'
+ if filename.exists():
+ with open(filename, 'r') as expected_file:
+ expected = expected_file.read()
+ # update to pick up latest data
+ if force_update:
+ self.force_update()
+ # force urwid to draw the screen
+ # (the main loop isn't runing so this doesn't happen automatically)
+ self.app.loop.draw_screen()
+ # take a screenshot
+ screenshot = self.html_fragment.screenshot_collect()[-1]
+
+ try:
+ if expected != screenshot:
+ # screenshot does not match
+ # => write an html file with the visual diff
+ out = self.test_dir / filename.name
+ with open(out, 'w+') as out_file:
+ out_file.write(
+ format_test_failure(
+ expected,
+ screenshot,
+ description,
+ )
+ )
+ raise Exception(
+ 'Screenshot differs:'
+ '\n* Set "CYLC_UPDATE_SCREENSHOTS=true" to update'
+ f'\n* To debug see: file:////{out}'
+ )
+
+ break
+ except Exception as exc_:
+ exc = exc_
+ # wait a while to allow the updater to do its job
+ sleep(delay)
+ else:
+ if os.environ.get('CYLC_UPDATE_SCREENSHOTS', '').lower() == 'true':
+ with open(filename, 'w+') as expected_file:
+ expected_file.write(screenshot)
+ else:
+ raise exc
+
+ def force_update(self):
+ """Run Tui's update method.
+
+ This is done automatically by compare_screenshot but you may want to
+ call it in a test, e.g. before pressing navigation keys.
+
+ With Rakiura, the Tui event loop is not running so the data is never
+ refreshed.
+
+ You do NOT need to call this method for key presses, but you do need to
+ call this if the data has changed (e.g. if you've changed a task state)
+ OR if you've changed any filters (because filters are handled by the
+ update code).
+ """
+ # flush any prior updates
+ self.app.get_update()
+ # wait for the next update
+ while not self.app.update():
+ pass
+
+ def wait_until_loaded(self, *ids, retries=20):
+ """Wait until the given ID appears in the Tui tree, then expand them.
+
+ Useful for waiting whilst Tui loads a workflow.
+
+ Note, this is a blocking wait with no timeout!
+ """
+ exc = None
+ try:
+ ids = self.app.wait_until_loaded(*ids, retries=retries)
+ except Exception as _exc:
+ exc = _exc
+ if ids:
+ msg = (
+ 'Requested nodes did not appear in Tui after'
+ f' {retries} retries: '
+ + ', '.join(ids)
+ )
+ if exc:
+ msg += f'\n{exc}'
+ self.compare_screenshot(f'fail-{uuid1()}', msg, 1)
+
+
+@pytest.fixture
+def rakiura(test_dir, request, monkeypatch):
+ """Visual regression test framework for Urwid apps.
+
+ Like Cypress but for Tui so named after a NZ island with lots of Tuis.
+
+ When called this yields a RakiuraSession object loaded with test
+ utilities. All tests have default retries to avoid flaky tests.
+
+ Similar to the "start" fixture, which starts a Scheduler without running
+ the main loop, rakiura starts Tui without running the main loop.
+
+ Arguments:
+ workflow_id:
+ The "WORKFLOW" argument of the "cylc tui" command line.
+ size:
+ The virtual terminal size for screenshots as a comma
+ separated string e.g. "80,50" for 80 cols wide by 50 rows tall.
+
+ Returns:
+ A RakiuraSession context manager which provides useful utilities for
+ testing.
+
+ """
+ return _rakiura(test_dir, request, monkeypatch)
+
+
+@pytest.fixture
+def mod_rakiura(test_dir, request, monkeypatch):
+ """Same as rakiura but configured to view module-scoped workflows.
+
+ Note: This is *not* a module-scoped fixture (no need, creating Tui sessions
+ is not especially slow), it is configured to display module-scoped
+ "scheduler" fixtures (which may be more expensive to create/destroy).
+ """
+ return _rakiura(test_dir.parent, request, monkeypatch)
+
+
+def _rakiura(test_dir, request, monkeypatch):
+ # make the workflow and scan update intervals match (more reliable)
+ # and speed things up a little whilst we're at it
+ monkeypatch.setattr(
+ 'cylc.flow.tui.updater.Updater.BASE_UPDATE_INTERVAL',
+ 0.1,
+ )
+ monkeypatch.setattr(
+ 'cylc.flow.tui.updater.Updater.BASE_SCAN_INTERVAL',
+ 0.1,
+ )
+
+ # the user name and the prefix of workflow IDs are both variable
+ # so we patch the render functions to make test output stable
+ def get_display_id(id_):
+ tokens = Tokens(id_)
+ return _get_display_id(
+ tokens.duplicate(
+ user='cylc',
+ workflow=tokens.get('workflow', '').rsplit('/', 1)[-1],
+ ).id
+ )
+ monkeypatch.setattr('cylc.flow.tui.util.ME', 'cylc')
+ monkeypatch.setattr(
+ 'cylc.flow.tui.util._display_workflow_id',
+ lambda data: data['name'].rsplit('/', 1)[-1]
+ )
+ monkeypatch.setattr(
+ 'cylc.flow.tui.overlay._get_display_id',
+ get_display_id,
+ )
+
+ # filter Tui so that only workflows created within our test show up
+ id_base = str(test_dir.relative_to(Path("~/cylc-run").expanduser()))
+ workflow_filter = re.escape(id_base) + r'/.*'
+
+ @contextmanager
+ def _rakiura(workflow_id=None, size='80,50'):
+ screen, html_fragment = configure_screenshot(size)
+ app = TuiApp(screen=screen)
+ with app.main(
+ workflow_id,
+ id_filter=workflow_filter,
+ interactive=False,
+ ):
+ yield RakiuraSession(
+ app,
+ html_fragment,
+ test_dir,
+ request.function.__name__,
+ )
+
+ return _rakiura
diff --git a/tests/integration/tui/screenshots/test_auto_expansion.later-time.html b/tests/integration/tui/screenshots/test_auto_expansion.later-time.html
new file mode 100644
index 00000000000..f5a19fd428d
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_auto_expansion.later-time.html
@@ -0,0 +1,21 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ b
+ - ̿○ 2
+ - ̿○ A
+ ̿○ a
+ ○ b
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_auto_expansion.on-load.html b/tests/integration/tui/screenshots/test_auto_expansion.on-load.html
new file mode 100644
index 00000000000..df3c9f5c41b
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_auto_expansion.on-load.html
@@ -0,0 +1,21 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ - ̿○ A
+ ̿○ a
+ ○ b
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_errors.list-error.html b/tests/integration/tui/screenshots/test_errors.list-error.html
new file mode 100644
index 00000000000..02448aa0267
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_errors.list-error.html
@@ -0,0 +1,31 @@
+┌────────────────┌────────────────────────────────────────────────┐────────────┐
+│ Error: Somethi│ Error │ │
+│ │ │ │
+│ < Select File │ Something went wrong :( │ > │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ │ │ │
+│ q to close │ q to close │ │
+└────────────────└────────────────────────────────────────────────┘────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_errors.open-error.html b/tests/integration/tui/screenshots/test_errors.open-error.html
new file mode 100644
index 00000000000..142d0d88c72
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_errors.open-error.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Error: Something went wrong :( │
+│ │
+│ < Select File > │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_job_logs.01-job.out.html b/tests/integration/tui/screenshots/test_job_logs.01-job.out.html
new file mode 100644
index 00000000000..c1c767b98cf
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_job_logs.01-job.out.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ job: 1/a/01 │
+│ this is a job log │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_job_logs.02-job.out.html b/tests/integration/tui/screenshots/test_job_logs.02-job.out.html
new file mode 100644
index 00000000000..0eb94051201
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_job_logs.02-job.out.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ job: 1/a/02 │
+│ this is a job log │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_navigation.cursor-at-bottom-of-screen.html b/tests/integration/tui/screenshots/test_navigation.cursor-at-bottom-of-screen.html
new file mode 100644
index 00000000000..bf5e3812008
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_navigation.cursor-at-bottom-of-screen.html
@@ -0,0 +1,31 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ + ̿○ A
+ - ̿○ B
+ - ̿○ B1
+ ̿○ b11
+ ̿○ b12
+ - ̿○ B2
+ ̿○ b21
+ ̿○ b22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_navigation.family-A-collapsed.html b/tests/integration/tui/screenshots/test_navigation.family-A-collapsed.html
new file mode 100644
index 00000000000..cefab5264f4
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_navigation.family-A-collapsed.html
@@ -0,0 +1,31 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ + ̿○ A
+ - ̿○ B
+ - ̿○ B1
+ ̿○ b11
+ ̿○ b12
+ - ̿○ B2
+ ̿○ b21
+ ̿○ b22
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_navigation.on-load.html b/tests/integration/tui/screenshots/test_navigation.on-load.html
new file mode 100644
index 00000000000..a0bd107742b
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_navigation.on-load.html
@@ -0,0 +1,31 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + one - paused
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_navigation.workflow-expanded.html b/tests/integration/tui/screenshots/test_navigation.workflow-expanded.html
new file mode 100644
index 00000000000..6b26ced563e
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_navigation.workflow-expanded.html
@@ -0,0 +1,31 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ - ̿○ A
+ ̿○ a1
+ ̿○ a2
+ - ̿○ B
+ - ̿○ B1
+ ̿○ b11
+ ̿○ b12
+ - ̿○ B2
+ ̿○ b21
+ ̿○ b22
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_offline_mutation.clean-command-error.html b/tests/integration/tui/screenshots/test_offline_mutation.clean-command-error.html
new file mode 100644
index 00000000000..88defab9486
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_offline_mutation.clean-command-error.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────┌────────────────────────────────────────────────┐
+ │ id│ Error │
+- ~cylc │ │ │
+ + one - stop│ Ac│ Error in command cylc clean --yes one │
+ │ < │ mock-stderr │
+ │ │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ │ │
+ │ │ │
+ │ │ │
+quit: q help: │ q t│ q to close │ome End
+filter tasks: T└────└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_offline_mutation.clean-mutation-selected.html b/tests/integration/tui/screenshots/test_offline_mutation.clean-mutation-selected.html
new file mode 100644
index 00000000000..f28cced0714
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_offline_mutation.clean-mutation-selected.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────────────────────────────────────────────────┐
+ │ id: ~cylc/one │
+- ~cylc │ │
+ + one - stop│ Action │
+ │ < (cancel) > │
+ │ │
+ │ < clean > │
+ │ < log > │
+ │ < play > │
+ │ < reinstall-reload > │
+ │ │
+ │ │
+ │ │
+quit: q help: │ q to close │↥ ↧ Home End
+filter tasks: T└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_offline_mutation.stop-all-mutation-selected.html b/tests/integration/tui/screenshots/test_offline_mutation.stop-all-mutation-selected.html
new file mode 100644
index 00000000000..c2355597f78
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_offline_mutation.stop-all-mutation-selected.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────────────────────────────────────────────────┐
+ │ id: ~cylc/root │
+- ~cylc │ │
+ + one - stop│ Action │
+ │ < (cancel) > │
+ │ │
+ │ < stop-all > │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+quit: q help: │ q to close │↥ ↧ Home End
+filter tasks: T└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_online_mutation.command-failed-client-error.html b/tests/integration/tui/screenshots/test_online_mutation.command-failed-client-error.html
new file mode 100644
index 00000000000..895856c6ea2
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_online_mutation.command-failed-client-error.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────┌────────────────────────────────────────────────┐
+ │ id│ Error │
+- ~cylc │ │ │
+ - one - paus│ Ac│ Error connecting to workflow: mock error │
+ - ̿○ 1 │ < │ │
+ ̿○ on│ │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ │ │
+quit: q help: │ q t│ q to close │ome End
+filter tasks: T└────└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_online_mutation.command-failed-workflow-stopped.html b/tests/integration/tui/screenshots/test_online_mutation.command-failed-workflow-stopped.html
new file mode 100644
index 00000000000..6f9954926ef
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_online_mutation.command-failed-workflow-stopped.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────┌────────────────────────────────────────────────┐
+ │ id│ Error │
+- ~cylc │ │ │
+ - one - paus│ Ac│ Cannot peform command hold on a stopped │
+ - ̿○ 1 │ < │ workflow │
+ ̿○ on│ │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ │ │
+quit: q help: │ q t│ q to close │ome End
+filter tasks: T└────└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_online_mutation.command-failed.html b/tests/integration/tui/screenshots/test_online_mutation.command-failed.html
new file mode 100644
index 00000000000..fae4a429cc6
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_online_mutation.command-failed.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────┌────────────────────────────────────────────────┐
+ │ id│ Error │
+- ~cylc │ │ │
+ - one - paus│ Ac│ Cannot peform command hold on a stopped │
+ - ̿○ 1 │ < │ workflow │
+ ̿○ on│ │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ │ │
+ │ │ │
+quit: q help: │ q t│ q to close │ome End
+filter tasks: T└────└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html b/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html
new file mode 100644
index 00000000000..34be2ffa0ce
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_online_mutation.hold-mutation-selected.html
@@ -0,0 +1,16 @@
+Cylc Tui work┌────────────────────────────────────────────────┐
+ │ id: 1/one │
+- ~cylc │ │
+ - one - paus│ Action │
+ - ̿○ 1 │ < (cancel) > │
+ ̿○ on│ │
+ │ < hold > │
+ │ < kill > │
+ │ < log > │
+ │ < poll > │
+ │ < release > │
+ │ < show > │
+ │ │
+quit: q help: │ q to close │↥ ↧ Home End
+filter tasks: T└────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_online_mutation.task-selected.html b/tests/integration/tui/screenshots/test_online_mutation.task-selected.html
new file mode 100644
index 00000000000..7d94d5e43dd
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_online_mutation.task-selected.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ one
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_restart_reconnect.1-workflow-running.html b/tests/integration/tui/screenshots/test_restart_reconnect.1-workflow-running.html
new file mode 100644
index 00000000000..74c02508239
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_restart_reconnect.1-workflow-running.html
@@ -0,0 +1,21 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ one
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_restart_reconnect.2-workflow-stopped.html b/tests/integration/tui/screenshots/test_restart_reconnect.2-workflow-stopped.html
new file mode 100644
index 00000000000..09c3bbd7fb0
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_restart_reconnect.2-workflow-stopped.html
@@ -0,0 +1,21 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - stopped
+ Workflow is not running
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_restart_reconnect.3-workflow-restarted.html b/tests/integration/tui/screenshots/test_restart_reconnect.3-workflow-restarted.html
new file mode 100644
index 00000000000..74c02508239
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_restart_reconnect.3-workflow-restarted.html
@@ -0,0 +1,21 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ one
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_scheduler_logs.log-file-selection.html b/tests/integration/tui/screenshots/test_scheduler_logs.log-file-selection.html
new file mode 100644
index 00000000000..f88e1b0124d
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_scheduler_logs.log-file-selection.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ this is the │
+│ scheduler log file │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ ┌──────────────────────────────────────┐ │
+│ │ Select File │ │
+│ │ │ │
+│ │ < config/01-start-01.cylc > │ │
+│ │ < config/flow-processed.cylc > │ │
+│ │ < scheduler/01-start-01.log > │ │
+│ │ │ │
+│ │ q to close │ │
+│ └──────────────────────────────────────┘ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_scheduler_logs.scheduler-log-file.html b/tests/integration/tui/screenshots/test_scheduler_logs.scheduler-log-file.html
new file mode 100644
index 00000000000..68dbcc10f9c
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_scheduler_logs.scheduler-log-file.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ this is the │
+│ scheduler log file │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_scheduler_logs.workflow-configuration-file.html b/tests/integration/tui/screenshots/test_scheduler_logs.workflow-configuration-file.html
new file mode 100644
index 00000000000..04ebb27ff79
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_scheduler_logs.workflow-configuration-file.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ [scheduling] │
+│ [[graph]] │
+│ R1 = a │
+│ [runtime] │
+│ [[a]] │
+│ [[root]] │
+│ [[[simulation]]] │
+│ default run length = PT0S │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_show.fail.html b/tests/integration/tui/screenshots/test_show.fail.html
new file mode 100644
index 00000000000..f788e5b3a55
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_show.fail.html
@@ -0,0 +1,41 @@
+Cylc Tui workflows┌────────────────────────────────────────────────┐
+ │ Error │
+- ~cylc │ │
+ - one - paused │ :( │
+ - ̿○ 1 │ │
+ ̿○ foo │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ ┌────│ │
+ │ id│ │
+ │ │ │
+ │ Ac│ │
+ │ < │ │
+ │ │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ < │ │
+ │ │ │
+ │ │ │
+ │ │ │
+ │ │ │
+ │ │ │
+ │ q t│ │
+ └────│ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+quit: q help: h co│ q to close │ome End
+filter tasks: T f s └────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_show.success.html b/tests/integration/tui/screenshots/test_show.success.html
new file mode 100644
index 00000000000..afdcd1a73b4
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_show.success.html
@@ -0,0 +1,41 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ foo
+
+
+
+
+
+ ┌────────────────────────────────────────────────┐
+ │ title: Foo │
+ │ description: The first metasyntactic │
+ │ variable. │
+ │ URL: (not given) │
+ │ state: waiting │
+ │ prerequisites: (None) │
+ │ outputs: ('-': not completed) │
+ │ - 1/foo expired │
+ │ - 1/foo submitted │
+ │ - 1/foo submit-failed │
+ │ - 1/foo started │
+ │ - 1/foo succeeded │
+ │ - 1/foo failed │
+ │ │
+ │ │
+ │ q to close │
+ └────────────────────────────────────────────────┘
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_subscribe_unsubscribe.subscribed.html b/tests/integration/tui/screenshots/test_subscribe_unsubscribe.subscribed.html
new file mode 100644
index 00000000000..019184ec897
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_subscribe_unsubscribe.subscribed.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ - one - paused
+ - ̿○ 1
+ ̿○ one
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_subscribe_unsubscribe.unsubscribed.html b/tests/integration/tui/screenshots/test_subscribe_unsubscribe.unsubscribed.html
new file mode 100644
index 00000000000..8fa0f4329a1
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_subscribe_unsubscribe.unsubscribed.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + one - paused
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_task_logs.latest-job.err.html b/tests/integration/tui/screenshots/test_task_logs.latest-job.err.html
new file mode 100644
index 00000000000..4814892df7a
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_task_logs.latest-job.err.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ job: 1/a/02 │
+│ this is a job error │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_task_logs.latest-job.out.html b/tests/integration/tui/screenshots/test_task_logs.latest-job.out.html
new file mode 100644
index 00000000000..0eb94051201
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_task_logs.latest-job.out.html
@@ -0,0 +1,31 @@
+┌──────────────────────────────────────────────────────────────────────────────┐
+│ Host: myhost │
+│ Path: mypath │
+│ < Select File > │
+│ │
+│ job: 1/a/02 │
+│ this is a job log │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ │
+│ q to close │
+└──────────────────────────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-enter.html b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-enter.html
new file mode 100644
index 00000000000..d54d9538d26
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-enter.html
@@ -0,0 +1,41 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+
+
+
+
+
+
+
+ ┌────────────────────────────────────────────────┐
+ │ id: ~cylc/root │
+ │ │
+ │ Action │
+ │ < (cancel) > │
+ │ │
+ │ < stop-all > │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ │
+ │ q to close │
+ └────────────────────────────────────────────────┘
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-help.html b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-help.html
new file mode 100644
index 00000000000..1795c586d9a
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura-help.html
@@ -0,0 +1,41 @@
+Cylc Tui ┌──────────────────────────────────────────────────────────┐
+ │ │
+- ~cylc │ _ _ _ │
+ │ | | | | (_) │
+ │ ___ _ _| | ___ | |_ _ _ _ │
+ │ / __| | | | |/ __| | __| | | | | │
+ │ | (__| |_| | | (__ | |_| |_| | | │
+ │ \___|\__, |_|\___| \__|\__,_|_| │
+ │ __/ | │
+ │ |___/ │
+ │ │
+ │ ( scroll using arrow keys ) │
+ │ │
+ │ │
+ │ │
+ │ _,@@@@@@. │
+ │ <=@@@, `@@@@@. │
+ │ `-@@@@@@@@@@@' │
+ │ :@@@@@@@@@@. │
+ │ (.@@@@@@@@@@@ │
+ │ ( '@@@@@@@@@@@@. │
+ │ ;.@@@@@@@@@@@@@@@ │
+ │ '@@@@@@@@@@@@@@@@@@, │
+ │ ,@@@@@@@@@@@@@@@@@@@@' │
+ │ :.@@@@@@@@@@@@@@@@@@@@@. │
+ │ .@@@@@@@@@@@@@@@@@@@@@@@@. │
+ │ '@@@@@@@@@@@@@@@@@@@@@@@@@. │
+ │ ;@@@@@@@@@@@@@@@@@@@@@@@@@@@ │
+ │ .@@@@@@@@@@@@@@@@@@@@@@@@@@. │
+ │ .@@@@@@@@@@@@@@@@@@@@@@@@@@, │
+ │ .@@@@@@@@@@@@@@@@@@@@@@@@@' │
+ │ .@@@@@@@@@@@@@@@@@@@@@@@@' , │
+ │ :@@@@@@@@@@@@@@@@@@@@@..''';,,,;::- │
+ │ '@@@@@@@@@@@@@@@@@@@. `. ` │
+ │ .@@@@@@.: ,.@@@@@@@. ` │
+ │ :@@@@@@@, ;.@, │
+ │ '@@@@@@. `@' │
+ │ │
+quit: q h│ q to close │ome End
+filter tas└──────────────────────────────────────────────────────────┘
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_tui_basics.test-rakiura.html b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura.html
new file mode 100644
index 00000000000..7f80031804b
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_tui_basics.test-rakiura.html
@@ -0,0 +1,41 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_workflow_states.filter-active.html b/tests/integration/tui/screenshots/test_workflow_states.filter-active.html
new file mode 100644
index 00000000000..282f76735ed
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_workflow_states.filter-active.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + one - stopping
+ + two - paused
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_workflow_states.filter-starts-with-t.html b/tests/integration/tui/screenshots/test_workflow_states.filter-starts-with-t.html
new file mode 100644
index 00000000000..8c26ce6ccc9
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_workflow_states.filter-starts-with-t.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + tre - stopped
+ + two - paused
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_workflow_states.filter-stopped-or-paused.html b/tests/integration/tui/screenshots/test_workflow_states.filter-stopped-or-paused.html
new file mode 100644
index 00000000000..8c26ce6ccc9
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_workflow_states.filter-stopped-or-paused.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + tre - stopped
+ + two - paused
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_workflow_states.filter-stopped.html b/tests/integration/tui/screenshots/test_workflow_states.filter-stopped.html
new file mode 100644
index 00000000000..1ff602df101
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_workflow_states.filter-stopped.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + tre - stopped
+
+
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/screenshots/test_workflow_states.unfiltered.html b/tests/integration/tui/screenshots/test_workflow_states.unfiltered.html
new file mode 100644
index 00000000000..0651eedec30
--- /dev/null
+++ b/tests/integration/tui/screenshots/test_workflow_states.unfiltered.html
@@ -0,0 +1,16 @@
+Cylc Tui workflows filtered (W - edit, E - reset)
+
+- ~cylc
+ + one - stopping
+ + tre - stopped
+ + two - paused
+
+
+
+
+
+
+
+quit: q help: h context: enter tree: - ← + → navigation: ↑ ↓ ↥ ↧ Home End
+filter tasks: T f s r R filter workflows: W E p
+
\ No newline at end of file
diff --git a/tests/integration/tui/test_app.py b/tests/integration/tui/test_app.py
new file mode 100644
index 00000000000..908866fc948
--- /dev/null
+++ b/tests/integration/tui/test_app.py
@@ -0,0 +1,388 @@
+#!/usr/bin/env python3
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import pytest
+import urwid
+
+from cylc.flow.cycling.integer import IntegerPoint
+from cylc.flow.task_state import (
+# TASK_STATUS_RUNNING,
+ TASK_STATUS_SUCCEEDED,
+# TASK_STATUS_FAILED,
+# TASK_STATUS_WAITING,
+)
+from cylc.flow.workflow_status import StopMode
+
+
+def set_task_state(schd, task_states):
+ """Force tasks into the desired states.
+
+ Task states should be of the format (cycle, task, state, is_held).
+ """
+ for cycle, task, state, is_held in task_states:
+ itask = schd.pool.get_task(cycle, task)
+ if not itask:
+ itask = schd.pool.spawn_task(task, cycle, {1})
+ itask.state_reset(state, is_held=is_held)
+ schd.data_store_mgr.delta_task_state(itask)
+ schd.data_store_mgr.increment_graph_window(
+ itask.tokens,
+ cycle,
+ {1},
+ )
+
+
+async def test_tui_basics(rakiura):
+ """Test basic Tui interaction with no workflows."""
+ with rakiura(size='80,40') as rk:
+ # the app should open
+ rk.compare_screenshot('test-rakiura', 'the app should have loaded')
+
+ # "h" should bring up the onscreen help
+ rk.user_input('h')
+ rk.compare_screenshot(
+ 'test-rakiura-help',
+ 'the help screen should be visible'
+ )
+
+ # "q" should close the popup
+ rk.user_input('q')
+ rk.compare_screenshot(
+ 'test-rakiura',
+ 'the help screen should have closed',
+ )
+
+ # "enter" should bring up the context menu
+ rk.user_input('enter')
+ rk.compare_screenshot(
+ 'test-rakiura-enter',
+ 'the context menu should have opened',
+ )
+
+ # "enter" again should close it via the "cancel" button
+ rk.user_input('enter')
+ rk.compare_screenshot(
+ 'test-rakiura',
+ 'the context menu should have closed',
+ )
+
+ # "ctrl d" should exit Tui
+ with pytest.raises(urwid.ExitMainLoop):
+ rk.user_input('ctrl d')
+
+ # "q" should exit Tui
+ with pytest.raises(urwid.ExitMainLoop):
+ rk.user_input('q')
+
+
+async def test_subscribe_unsubscribe(one_conf, flow, scheduler, start, rakiura):
+ """Test a simple workflow with one task."""
+ id_ = flow(one_conf, name='one')
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+ with rakiura(size='80,15') as rk:
+ rk.compare_screenshot(
+ 'unsubscribed',
+ 'the workflow should be collapsed'
+ ' (no subscription no state totals)',
+ )
+
+ # expand the workflow to subscribe to it
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded()
+ rk.compare_screenshot(
+ 'subscribed',
+ 'the workflow should be expanded',
+ )
+
+ # collapse the workflow to unsubscribe from it
+ rk.user_input('left', 'up')
+ rk.force_update()
+ rk.compare_screenshot(
+ 'unsubscribed',
+ 'the workflow should be collapsed'
+ ' (no subscription no state totals)',
+ )
+
+
+async def test_workflow_states(one_conf, flow, scheduler, start, rakiura):
+ """Test viewing multiple workflows in different states."""
+ # one => stopping
+ id_1 = flow(one_conf, name='one')
+ schd_1 = scheduler(id_1)
+ # two => paused
+ id_2 = flow(one_conf, name='two')
+ schd_2 = scheduler(id_2)
+ # tre => stopped
+ flow(one_conf, name='tre')
+
+ async with start(schd_1):
+ schd_1.stop_mode = StopMode.AUTO # make it look like we're stopping
+ await schd_1.update_data_structure()
+
+ async with start(schd_2):
+ await schd_2.update_data_structure()
+ with rakiura(size='80,15') as rk:
+ rk.compare_screenshot(
+ 'unfiltered',
+ 'All workflows should be visible (one, two, tree)',
+ )
+
+ # filter for active workflows (i.e. paused, running, stopping)
+ rk.user_input('p')
+ rk.compare_screenshot(
+ 'filter-active',
+ 'Only active workflows should be visible (one, two)'
+ )
+
+ # invert the filter so we are filtering for stopped workflows
+ rk.user_input('W', 'enter', 'q')
+ rk.compare_screenshot(
+ 'filter-stopped',
+ 'Only stopped workflow should be visible (tre)'
+ )
+
+ # filter in paused workflows
+ rk.user_input('W', 'down', 'enter', 'q')
+ rk.force_update()
+ rk.compare_screenshot(
+ 'filter-stopped-or-paused',
+ 'Only stopped or paused workflows should be visible'
+ ' (two, tre)',
+ )
+
+ # reset the state filters
+ rk.user_input('W', 'down', 'down', 'enter', 'down', 'enter')
+
+ # scroll to the id filter text box
+ rk.user_input('down', 'down', 'down', 'down')
+
+ # scroll to the end of the ID
+ rk.user_input(*['right'] * (
+ len(schd_1.tokens['workflow'].rsplit('/', 1)[0]) + 1)
+ )
+
+ # type the letter "t"
+ # (this should filter for workflows starting with "t")
+ rk.user_input('t')
+ rk.force_update() # this is required for the tests
+ rk.user_input('page up', 'q') # close the dialogue
+
+ rk.compare_screenshot(
+ 'filter-starts-with-t',
+ 'Only workflows starting with the letter "t" should be'
+ ' visible (two, tre)',
+ )
+
+
+# TODO: Task state filtering is currently broken
+# see: https://github.com/cylc/cylc-flow/issues/5716
+#
+# async def test_task_states(flow, scheduler, start, rakiura):
+# id_ = flow({
+# 'scheduler': {
+# 'allow implicit tasks': 'true',
+# },
+# 'scheduling': {
+# 'initial cycle point': '1',
+# 'cycling mode': 'integer',
+# 'runahead limit': 'P1',
+# 'graph': {
+# 'P1': '''
+# a => b => c
+# b[-P1] => b
+# '''
+# }
+# }
+# }, name='test_task_states')
+# schd = scheduler(id_)
+# async with start(schd):
+# set_task_state(
+# schd,
+# [
+# (IntegerPoint('1'), 'a', TASK_STATUS_SUCCEEDED, False),
+# # (IntegerPoint('1'), 'b', TASK_STATUS_FAILED, False),
+# (IntegerPoint('1'), 'c', TASK_STATUS_RUNNING, False),
+# # (IntegerPoint('2'), 'a', TASK_STATUS_RUNNING, False),
+# (IntegerPoint('2'), 'b', TASK_STATUS_WAITING, True),
+# ]
+# )
+# await schd.update_data_structure()
+#
+# with rakiura(schd.tokens.id, size='80,20') as rk:
+# rk.compare_screenshot('unfiltered')
+#
+# # filter out waiting tasks
+# rk.user_input('T', 'down', 'enter', 'q')
+# rk.compare_screenshot('filter-not-waiting')
+
+
+async def test_navigation(flow, scheduler, start, rakiura):
+ """Test navigating with the arrow keys."""
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {
+ 'R1': 'A & B1 & B2',
+ }
+ },
+ 'runtime': {
+ 'A': {},
+ 'B': {},
+ 'B1': {'inherit': 'B'},
+ 'B2': {'inherit': 'B'},
+ 'a1': {'inherit': 'A'},
+ 'a2': {'inherit': 'A'},
+ 'b11': {'inherit': 'B1'},
+ 'b12': {'inherit': 'B1'},
+ 'b21': {'inherit': 'B2'},
+ 'b22': {'inherit': 'B2'},
+ }
+ }, name='one')
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+
+ with rakiura(size='80,30') as rk:
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ rk.compare_screenshot(
+ 'on-load',
+ 'the workflow should be collapsed when Tui is loaded',
+ )
+
+ # pressing "right" should connect to the workflow
+ # and expand it once the data arrives
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(schd.tokens.id)
+ rk.compare_screenshot(
+ 'workflow-expanded',
+ 'the workflow should be expanded',
+ )
+
+ # pressing "left" should collapse the node
+ rk.user_input('down', 'down', 'left')
+ rk.compare_screenshot(
+ 'family-A-collapsed',
+ 'the family "1/A" should be collapsed',
+ )
+
+ # the "page up" and "page down" buttons should navigate to the top
+ # and bottom of the screen
+ rk.user_input('page down')
+ rk.compare_screenshot(
+ 'cursor-at-bottom-of-screen',
+ 'the cursor should be at the bottom of the screen',
+ )
+
+
+async def test_auto_expansion(flow, scheduler, start, rakiura):
+ """It should automatically expand cycles and top-level families.
+
+ When a workflow is expanded, Tui should auto expand cycles and top-level
+ families. Any new cycles and top-level families should be auto-expanded
+ when added.
+ """
+ id_ = flow({
+ 'scheduling': {
+ 'runahead limit': 'P1',
+ 'initial cycle point': '1',
+ 'cycling mode': 'integer',
+ 'graph': {
+ 'P1': 'b[-P1] => a => b'
+ },
+ },
+ 'runtime': {
+ 'A': {},
+ 'a': {'inherit': 'A'},
+ 'b': {},
+ },
+ }, name='one')
+ schd = scheduler(id_)
+ with rakiura(size='80,20') as rk:
+ async with start(schd):
+ await schd.update_data_structure()
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # open the workflow
+ rk.force_update()
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(schd.tokens.id)
+
+ rk.compare_screenshot(
+ 'on-load',
+ 'cycle "1" and top-level family "1/A" should be expanded',
+ )
+
+ for task in ('a', 'b'):
+ itask = schd.pool.get_task(IntegerPoint('1'), task)
+ itask.state_reset(TASK_STATUS_SUCCEEDED)
+ schd.pool.spawn_on_output(itask, TASK_STATUS_SUCCEEDED)
+ await schd.update_data_structure()
+
+ rk.compare_screenshot(
+ 'later-time',
+ 'cycle "2" and top-level family "2/A" should be expanded',
+ )
+
+
+async def test_restart_reconnect(one_conf, flow, scheduler, start, rakiura):
+ """It should handle workflow shutdown and restart.
+
+ The Cylc client can raise exceptions e.g. WorkflowStopped. Any text written
+ to stdout/err will mess with Tui. The purpose of this test is to ensure Tui
+ can handle shutdown / restart without any errors occuring and any spurious
+ text appearing on the screen.
+ """
+ with rakiura(size='80,20') as rk:
+ schd = scheduler(flow(one_conf, name='one'))
+
+ # 1- start the workflow
+ async with start(schd):
+ await schd.update_data_structure()
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # expand the workflow (subscribes to updates from it)
+ rk.force_update()
+ rk.user_input('down', 'right')
+
+ # wait for workflow to appear (expanded)
+ rk.wait_until_loaded(schd.tokens.id)
+ rk.compare_screenshot(
+ '1-workflow-running',
+ 'the workflow should appear in tui and be expanded',
+ )
+
+ # 2 - stop the worlflow
+ rk.compare_screenshot(
+ '2-workflow-stopped',
+ 'the stopped workflow should be collapsed with a message saying'
+ ' workflow stopped',
+ )
+
+ # 3- restart the workflow
+ schd = scheduler(flow(one_conf, name='one'))
+ async with start(schd):
+ await schd.update_data_structure()
+ rk.wait_until_loaded(schd.tokens.id)
+ rk.compare_screenshot(
+ '3-workflow-restarted',
+ 'the restarted workflow should be expanded',
+ )
diff --git a/tests/integration/tui/test_logs.py b/tests/integration/tui/test_logs.py
new file mode 100644
index 00000000000..7c63ad39fcc
--- /dev/null
+++ b/tests/integration/tui/test_logs.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python3
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import asyncio
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+from cylc.flow.cycling.integer import IntegerPoint
+from cylc.flow.exceptions import ClientError
+from cylc.flow.task_job_logs import get_task_job_log
+from cylc.flow.task_state import (
+ TASK_STATUS_FAILED,
+ TASK_STATUS_SUCCEEDED,
+)
+from cylc.flow.tui.data import _get_log
+
+import pytest
+
+if TYPE_CHECKING:
+ from cylc.flow.id import Tokens
+
+
+def get_job_log(tokens: 'Tokens', suffix: str) -> Path:
+ """Return the path to a job log file.
+
+ Args:
+ tokens: Job tokens.
+ suffix: Filename.
+
+ """
+ return Path(get_task_job_log(
+ tokens['workflow'],
+ tokens['cycle'],
+ tokens['task'],
+ tokens['job'],
+ suffix=suffix,
+ ))
+
+
+@pytest.fixture(scope='module')
+def standarise_host_and_path(mod_monkeypatch):
+ """Replace variable content in the log view.
+
+ The log view displays the "Host" and "Path" of the log file. These will
+ differer from user to user, so we mock away the difference to produce
+ stable results.
+ """
+ def _parse_log_header(contents):
+ _header, text = contents.split('\n', 1)
+ return 'myhost', 'mypath', text
+
+ mod_monkeypatch.setattr(
+ 'cylc.flow.tui.data._parse_log_header',
+ _parse_log_header,
+ )
+
+
+@pytest.fixture
+def wait_log_loaded(monkeypatch):
+ """Wait for Tui to successfully open a log file."""
+ # previous log open count
+ before = 0
+ # live log open count
+ count = 0
+
+ # wrap the Tui "_get_log" method to count the number of times it has
+ # returned
+ def __get_log(*args, **kwargs):
+ nonlocal count
+ try:
+ ret = _get_log(*args, **kwargs)
+ except ClientError as exc:
+ count += 1
+ raise exc
+ count += 1
+ return ret
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data._get_log',
+ __get_log,
+ )
+
+ async def _wait_log_loaded(tries: int = 25, delay: float = 0.1):
+ """Wait for the log file to be loaded.
+
+ Args:
+ tries: The number of (re)tries to attempt before failing.
+ delay: The delay between retries.
+
+ """
+ nonlocal before, count
+ for _try in range(tries):
+ if count > before:
+ await asyncio.sleep(0)
+ before += 1
+ return
+ await asyncio.sleep(delay)
+ raise Exception(f'Log file was not loaded within {delay * tries}s')
+
+ return _wait_log_loaded
+
+
+@pytest.fixture(scope='module')
+async def workflow(mod_flow, mod_scheduler, mod_start, standarise_host_and_path):
+ """Test fixture providing a workflow with some log files to poke at."""
+ id_ = mod_flow({
+ 'scheduling': {
+ 'graph': {
+ 'R1': 'a',
+ }
+ },
+ 'runtime': {
+ 'a': {},
+ }
+ }, name='one')
+ schd = mod_scheduler(id_)
+ async with mod_start(schd):
+ # create some log files for tests to inspect
+
+ # create a scheduler log
+ # (note the scheduler log doesn't get created in integration tests)
+ scheduler_log = Path(schd.workflow_log_dir, '01-start-01.log')
+ with open(scheduler_log, 'w+') as logfile:
+ logfile.write('this is the\nscheduler log file')
+
+ # task 1/a
+ itask = schd.pool.get_task(IntegerPoint('1'), 'a')
+ itask.submit_num = 2
+
+ # mark 1/a/01 as failed
+ job_1 = schd.tokens.duplicate(cycle='1', task='a', job='01')
+ schd.data_store_mgr.insert_job(
+ 'a',
+ IntegerPoint('1'),
+ TASK_STATUS_SUCCEEDED,
+ {'submit_num': 1, 'platform': {'name': 'x'}}
+ )
+ schd.data_store_mgr.delta_job_state(job_1, TASK_STATUS_FAILED)
+
+ # mark 1/a/02 as succeeded
+ job_2 = schd.tokens.duplicate(cycle='1', task='a', job='02')
+ schd.data_store_mgr.insert_job(
+ 'a',
+ IntegerPoint('1'),
+ TASK_STATUS_SUCCEEDED,
+ {'submit_num': 2, 'platform': {'name': 'x'}}
+ )
+ schd.data_store_mgr.delta_job_state(job_1, TASK_STATUS_SUCCEEDED)
+ schd.data_store_mgr.delta_task_state(itask)
+
+ # mark 1/a as succeeded
+ itask.state_reset(TASK_STATUS_SUCCEEDED)
+ schd.data_store_mgr.delta_task_state(itask)
+
+ # 1/a/01 - job.out
+ job_1_out = get_job_log(job_1, 'job.out')
+ job_1_out.parent.mkdir(parents=True)
+ with open(job_1_out, 'w+') as log:
+ log.write(f'job: {job_1.relative_id}\nthis is a job log\n')
+
+ # 1/a/02 - job.out
+ job_2_out = get_job_log(job_2, 'job.out')
+ job_2_out.parent.mkdir(parents=True)
+ with open(job_2_out, 'w+') as log:
+ log.write(f'job: {job_2.relative_id}\nthis is a job log\n')
+
+ # 1/a/02 - job.err
+ job_2_err = get_job_log(job_2, 'job.err')
+ with open(job_2_err, 'w+') as log:
+ log.write(f'job: {job_2.relative_id}\nthis is a job error\n')
+
+ # 1/a/NN -> 1/a/02
+ (job_2_out.parent.parent / 'NN').symlink_to(
+ (job_2_out.parent.parent / '02'),
+ target_is_directory=True,
+ )
+
+ # populate the data store
+ await schd.update_data_structure()
+
+ yield schd
+
+
+async def test_scheduler_logs(
+ workflow,
+ mod_rakiura,
+ wait_log_loaded,
+):
+ """Test viewing the scheduler log files."""
+ with mod_rakiura(size='80,30') as rk:
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # open the workflow in Tui
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(workflow.tokens.id)
+
+ # open the log view for the workflow
+ rk.user_input('enter')
+ rk.user_input('down', 'down', 'enter')
+
+ # wait for the default log file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ 'scheduler-log-file',
+ 'the scheduler log file should be open',
+ )
+
+ # open the list of log files
+ rk.user_input('enter')
+ rk.compare_screenshot(
+ 'log-file-selection',
+ 'the list of available log files should be displayed'
+ )
+
+ # select the processed workflow configuration file
+ rk.user_input('down', 'enter')
+
+ # wait for the file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ 'workflow-configuration-file',
+ 'the workflow configuration file should be open'
+ )
+
+
+async def test_task_logs(
+ workflow,
+ mod_rakiura,
+ wait_log_loaded,
+):
+ """Test viewing task log files.
+
+ I.E. Test viewing job log files by opening the log view on a task.
+ """
+ with mod_rakiura(size='80,30') as rk:
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # open the workflow in Tui
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(workflow.tokens.id)
+
+ # open the context menu for the task 1/a
+ rk.user_input('down', 'down', 'enter')
+
+ # open the log view for the task 1/a
+ rk.user_input('down', 'down', 'down', 'enter')
+
+ # wait for the default log file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ 'latest-job.out',
+ 'the job.out file for the second job should be open',
+ )
+
+ rk.user_input('enter')
+ rk.user_input('enter')
+
+ # wait for the job.err file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ 'latest-job.err',
+ 'the job.out file for the second job should be open',
+ )
+
+
+async def test_job_logs(
+ workflow,
+ mod_rakiura,
+ wait_log_loaded,
+):
+ """Test viewing the job log files.
+
+ I.E. Test viewing job log files by opening the log view on a job.
+ """
+ with mod_rakiura(size='80,30') as rk:
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # open the workflow in Tui
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(workflow.tokens.id)
+
+ # open the context menu for the job 1/a/02
+ rk.user_input('down', 'down', 'right', 'down', 'enter')
+
+ # open the log view for the job 1/a/02
+ rk.user_input('down', 'down', 'down', 'enter')
+
+ # wait for the default log file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ '02-job.out',
+ 'the job.out file for the *second* job should be open',
+ )
+
+ # close log view
+ rk.user_input('q')
+
+ # open the log view for the job 1/a/01
+ rk.user_input('down', 'enter')
+ rk.user_input('down', 'down', 'down', 'enter')
+
+ # wait for the default log file to load
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ '01-job.out',
+ 'the job.out file for the *first* job should be open',
+ )
+
+
+async def test_errors(
+ workflow,
+ mod_rakiura,
+ wait_log_loaded,
+ monkeypatch,
+):
+ """Test error handing of cat-log commands."""
+ # make it look like cat-log commands are failing
+ def cli_cmd_fail(*args, **kwargs):
+ raise ClientError('Something went wrong :(')
+
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data.cli_cmd',
+ cli_cmd_fail,
+ )
+
+ with mod_rakiura(size='80,30') as rk:
+ # wait for the workflow to appear (collapsed)
+ rk.wait_until_loaded('#spring')
+
+ # open the log view on scheduler
+ rk.user_input('down', 'enter', 'down', 'down', 'enter')
+
+ # it will fail to open
+ await wait_log_loaded()
+ rk.compare_screenshot(
+ 'open-error',
+ 'the error message should be displayed in the log view header',
+ )
+
+ # open the file selector
+ rk.user_input('enter')
+
+ # it will fail to list avialable log files
+ rk.compare_screenshot(
+ 'list-error',
+ 'the error message should be displayed in a pop up',
+ )
diff --git a/tests/integration/tui/test_mutations.py b/tests/integration/tui/test_mutations.py
new file mode 100644
index 00000000000..18da88d227d
--- /dev/null
+++ b/tests/integration/tui/test_mutations.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python3
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import asyncio
+
+import pytest
+
+from cylc.flow.exceptions import ClientError
+
+
+async def gen_commands(schd):
+ """Yield commands from the scheduler's command queue."""
+ while True:
+ await asyncio.sleep(0.1)
+ if not schd.command_queue.empty():
+ yield schd.command_queue.get()
+
+
+async def test_online_mutation(
+ one_conf,
+ flow,
+ scheduler,
+ start,
+ rakiura,
+ monkeypatch,
+):
+ """Test a simple workflow with one task."""
+ id_ = flow(one_conf, name='one')
+ schd = scheduler(id_)
+ with rakiura(size='80,15') as rk:
+ async with start(schd):
+ await schd.update_data_structure()
+ assert schd.command_queue.empty()
+
+ # open the workflow
+ rk.force_update()
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(schd.tokens.id)
+
+ # focus on a task
+ rk.user_input('down', 'right', 'down', 'right')
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the task
+ # successfully
+ 'task-selected',
+ 'the cursor should be on the task 1/foo',
+ )
+
+ # focus on the hold mutation for a task
+ rk.user_input('enter', 'down')
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'hold-mutation-selected',
+ 'the cursor should be on the "hold" mutation',
+ )
+
+ # run the hold mutation
+ rk.user_input('enter')
+
+ # the mutation should be in the scheduler's command_queue
+ command = None
+ async for command in gen_commands(schd):
+ break
+ assert command == ('hold', (['1/one'],), {})
+
+ # close the dialogue and re-run the hold mutation
+ rk.user_input('q', 'q', 'enter')
+ rk.compare_screenshot(
+ 'command-failed-workflow-stopped',
+ 'an error should be visible explaining that the operation'
+ ' cannot be performed on a stopped workflow',
+ # NOTE: don't update so Tui still thinks the workflow is running
+ force_update=False,
+ )
+
+ # force mutations to raise ClientError
+ def _get_client(*args, **kwargs):
+ raise ClientError('mock error')
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data.get_client',
+ _get_client,
+ )
+
+ # close the dialogue and re-run the hold mutation
+ rk.user_input('q', 'q', 'enter')
+ rk.compare_screenshot(
+ 'command-failed-client-error',
+ 'an error should be visible explaining that the operation'
+ ' failed due to a client error',
+ # NOTE: don't update so Tui still thinks the workflow is running
+ force_update=False,
+ )
+
+
+@pytest.fixture
+def standardise_cli_cmds(monkeypatch):
+ """This remove the variable bit of the workflow ID from CLI commands.
+
+ The workflow ID changes from run to run. In order to make screenshots
+ stable, this
+ """
+ from cylc.flow.tui.data import extract_context
+ def _extract_context(selection):
+ context = extract_context(selection)
+ if 'workflow' in context:
+ context['workflow'] = [
+ workflow.rsplit('/', 1)[-1]
+ for workflow in context.get('workflow', [])
+ ]
+ return context
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data.extract_context',
+ _extract_context,
+ )
+
+@pytest.fixture
+def capture_commands(monkeypatch):
+ ret = []
+ returncode = [0]
+
+ class _Popen:
+ def __init__(self, *args, **kwargs):
+ nonlocal ret
+ ret.append(args)
+
+ def communicate(self):
+ return 'mock-stdout', 'mock-stderr'
+
+ @property
+ def returncode(self):
+ nonlocal returncode
+ return returncode[0]
+
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data.Popen',
+ _Popen,
+ )
+
+ return ret, returncode
+
+
+async def test_offline_mutation(
+ one_conf,
+ flow,
+ rakiura,
+ capture_commands,
+ standardise_cli_cmds,
+):
+ id_ = flow(one_conf, name='one')
+ commands, returncode = capture_commands
+
+ with rakiura(size='80,15') as rk:
+ # run the stop-all mutation
+ rk.wait_until_loaded('root')
+ rk.user_input('enter', 'down')
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the task
+ # successfully
+ 'stop-all-mutation-selected',
+ 'the stop-all mutation should be selected',
+ )
+ rk.user_input('enter')
+
+ # the command "cylc stop '*'" should have been run
+ assert commands == [(['cylc', 'stop', '*'],)]
+ commands.clear()
+
+ # run the clean command on the workflow
+ rk.user_input('down', 'enter', 'down')
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'clean-mutation-selected',
+ 'the clean mutation should be selected',
+ )
+ rk.user_input('enter')
+
+ # the command "cylc clean " should have been run
+ assert commands == [(['cylc', 'clean', '--yes', 'one'],)]
+ commands.clear()
+
+ # make commands fail
+ returncode[:] = [1]
+ rk.user_input('enter', 'down')
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'clean-mutation-selected',
+ 'the clean mutation should be selected',
+ )
+ rk.user_input('enter')
+
+ assert commands == [(['cylc', 'clean', '--yes', 'one'],)]
+
+ rk.compare_screenshot(
+ # take a screenshot to ensure we have focused on the mutation
+ # successfully
+ 'clean-command-error',
+ 'there should be a box displaying the error containing the stderr'
+ ' returned by the command',
+ )
diff --git a/tests/integration/tui/test_show.py b/tests/integration/tui/test_show.py
new file mode 100644
index 00000000000..c664cdd1393
--- /dev/null
+++ b/tests/integration/tui/test_show.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from cylc.flow.exceptions import ClientError
+from cylc.flow.tui.data import _show
+
+
+async def test_show(flow, scheduler, start, rakiura, monkeypatch):
+ """Test "cylc show" support in Tui."""
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {
+ 'R1': 'foo'
+ },
+ },
+ 'runtime': {
+ 'foo': {
+ 'meta': {
+ 'title': 'Foo',
+ 'description': 'The first metasyntactic variable.'
+ },
+ },
+ },
+ }, name='one')
+ schd = scheduler(id_)
+ async with start(schd):
+ await schd.update_data_structure()
+
+ with rakiura(size='80,40') as rk:
+ rk.user_input('down', 'right')
+ rk.wait_until_loaded(schd.tokens.id)
+
+ # select a task
+ rk.user_input('down', 'down', 'enter')
+
+ # select the "show" context option
+ rk.user_input(*(['down'] * 6), 'enter')
+ rk.compare_screenshot(
+ 'success',
+ 'the show output should be displayed',
+ )
+
+ # make it look like "cylc show" failed
+ def cli_cmd_fail(*args, **kwargs):
+ raise ClientError(':(')
+ monkeypatch.setattr(
+ 'cylc.flow.tui.data.cli_cmd',
+ cli_cmd_fail,
+ )
+
+ # select the "show" context option
+ rk.user_input('q', 'enter', *(['down'] * 6), 'enter')
+ rk.compare_screenshot(
+ 'fail',
+ 'the error should be displayed',
+ )
diff --git a/tests/integration/tui/test_updater.py b/tests/integration/tui/test_updater.py
new file mode 100644
index 00000000000..2d9927ca5eb
--- /dev/null
+++ b/tests/integration/tui/test_updater.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from copy import deepcopy
+from pathlib import Path
+from queue import Queue
+import re
+
+from async_timeout import timeout
+import pytest
+
+from cylc.flow.cycling.integer import IntegerPoint
+from cylc.flow.id import Tokens
+from cylc.flow.tui.updater import (
+ Updater,
+ get_default_filters,
+)
+from cylc.flow.workflow_status import WorkflowStatus
+
+
+@pytest.fixture
+def updater(monkeypatch, test_dir):
+ """Return an updater ready for testing."""
+ # patch the update intervals so that everything runs for every update
+ monkeypatch.setattr(
+ 'cylc.flow.tui.updater.Updater.BASE_UPDATE_INTERVAL',
+ 0,
+ )
+ monkeypatch.setattr(
+ 'cylc.flow.tui.updater.Updater.BASE_SCAN_INTERVAL',
+ 0,
+ )
+
+ # create the updater
+ updater = Updater()
+
+ # swap multiprocessing.Queue for queue.Queue
+ # (this means queued operations are instant making tests more stable)
+ updater.update_queue = Queue()
+ updater._command_queue = Queue()
+
+ # set up the filters
+ # (these filter for the workflows created in this test only)
+ filters = get_default_filters()
+ id_base = str(test_dir.relative_to(Path("~/cylc-run").expanduser()))
+ filters['workflows']['id'] = f'^{re.escape(id_base)}/.*'
+ updater._update_filters(filters)
+
+ return updater
+
+
+def get_child_tokens(root_node, types, relative=False):
+ """Return all ID of the specified types contained within the provided tree.
+
+ Args:
+ root_node:
+ The Tui tree you want to look for IDs in.
+ types:
+ The Tui types (e.g. 'workflow' or 'task') you want to extract.
+ relative:
+ If True, the relative IDs will be returned.
+
+ """
+ ret = set()
+ stack = [root_node]
+ while stack:
+ node = stack.pop()
+ stack.extend(node['children'])
+ if node['type_'] in types:
+
+ tokens = Tokens(node['id_'])
+ if relative:
+ ret.add(tokens.relative_id)
+ else:
+ ret.add(tokens.id)
+ return ret
+
+
+async def test_subscribe(one_conf, flow, scheduler, run, updater):
+ """It should subscribe and unsubscribe from workflows."""
+ id_ = flow(one_conf)
+ schd = scheduler(id_)
+
+ async with run(schd):
+ # run the updater and the test
+ async with timeout(10):
+ # wait for the first update
+ root_node = await updater._update()
+
+ # there should be a root root_node
+ assert root_node['id_'] == 'root'
+ # a single root_node representing the workflow
+ assert root_node['children'][0]['id_'] == schd.tokens.id
+ # and a "spring" root_node used to active the subscription
+ # mechanism
+ assert root_node['children'][0]['children'][0]['id_'] == '#spring'
+
+ # subscribe to the workflow
+ updater.subscribe(schd.tokens.id)
+ root_node = await updater._update()
+
+ # check the workflow contains one cycle with one task in it
+ workflow_node = root_node['children'][0]
+ assert len(workflow_node['children']) == 1
+ cycle_node = workflow_node['children'][0]
+ assert Tokens(cycle_node['id_']).relative_id == '1' # cycle ID
+ assert len(cycle_node['children']) == 1
+ task_node = cycle_node['children'][0]
+ assert Tokens(task_node['id_']).relative_id == '1/one' # task ID
+
+ # unsubscribe from the workflow
+ updater.unsubscribe(schd.tokens.id)
+ root_node = await updater._update()
+
+ # the workflow should be replaced by a "spring" node again
+ assert root_node['children'][0]['children'][0]['id_'] == '#spring'
+
+
+async def test_filters(one_conf, flow, scheduler, run, updater):
+ """It should filter workflow and task states.
+
+ Note:
+ The workflow ID filter is not explicitly tested here, but it is
+ indirectly tested, otherwise other workflows would show up in the
+ updater results.
+
+ """
+ one = scheduler(flow({
+ 'scheduler': {
+ 'allow implicit tasks': 'True',
+ },
+ 'scheduling': {
+ 'graph': {
+ 'R1': 'a & b & c',
+ }
+ },
+ 'runtime': {
+ # TODO: remove this runtime section in
+ # https://github.com/cylc/cylc-flow/pull/5721
+ 'root': {
+ 'simulation': {
+ 'default run length': 'PT1M',
+ },
+ },
+ },
+ }, name='one'), paused_start=True)
+ two = scheduler(flow(one_conf, name='two'))
+ tre = scheduler(flow(one_conf, name='tre'))
+
+ # start workflow "one"
+ async with run(one):
+ # mark "1/a" as running and "1/b" as succeeded
+ one_a = one.pool.get_task(IntegerPoint('1'), 'a')
+ one_a.state_reset('running')
+ one.data_store_mgr.delta_task_state(one_a)
+ one.pool.get_task(IntegerPoint('1'), 'b').state_reset('succeeded')
+
+ # start workflow "two"
+ async with run(two):
+ # run the updater and the test
+ filters = deepcopy(updater.filters)
+
+ root_node = await updater._update()
+ assert {child['id_'] for child in root_node['children']} == {
+ one.tokens.id,
+ two.tokens.id,
+ tre.tokens.id,
+ }
+
+ # filter out paused workflows
+ filters = deepcopy(filters)
+ filters['workflows'][WorkflowStatus.STOPPED.value] = True
+ filters['workflows'][WorkflowStatus.PAUSED.value] = False
+ updater.update_filters(filters)
+
+ # "one" and "two" should now be filtered out
+ root_node = await updater._update()
+ assert {child['id_'] for child in root_node['children']} == {
+ tre.tokens.id,
+ }
+
+ # filter out stopped workflows
+ filters = deepcopy(filters)
+ filters['workflows'][WorkflowStatus.STOPPED.value] = False
+ filters['workflows'][WorkflowStatus.PAUSED.value] = True
+ updater.update_filters(filters)
+
+ # "tre" should now be filtered out
+ root_node = await updater._update()
+ assert {child['id_'] for child in root_node['children']} == {
+ one.tokens.id,
+ two.tokens.id,
+ }
+
+ # subscribe to "one"
+ updater._subscribe(one.tokens.id)
+ root_node = await updater._update()
+ assert get_child_tokens(
+ root_node, types={'task'}, relative=True
+ ) == {
+ '1/a',
+ '1/b',
+ '1/c',
+ }
+
+ # filter out running tasks
+ # TODO: see https://github.com/cylc/cylc-flow/issues/5716
+ # filters = deepcopy(filters)
+ # filters['tasks'][TASK_STATUS_RUNNING] = False
+ # updater.update_filters(filters)
+
+ # root_node = await updater._update()
+ # assert get_child_tokens(
+ # root_node,
+ # types={'task'},
+ # relative=True
+ # ) == {
+ # '1/b',
+ # '1/c',
+ # }
diff --git a/tests/integration/utils/flow_tools.py b/tests/integration/utils/flow_tools.py
index 927884c233d..c26f79c04ac 100644
--- a/tests/integration/utils/flow_tools.py
+++ b/tests/integration/utils/flow_tools.py
@@ -53,7 +53,7 @@ def _make_src_flow(src_path, conf):
def _make_flow(
cylc_run_dir: Union[Path, str],
test_dir: Path,
- conf: Union[dict, str],
+ conf: dict,
name: Optional[str] = None,
id_: Optional[str] = None,
) -> str:
@@ -65,12 +65,18 @@ def _make_flow(
name = str(uuid1())
flow_run_dir = (test_dir / name)
flow_run_dir.mkdir(parents=True, exist_ok=True)
- reg = str(flow_run_dir.relative_to(cylc_run_dir))
- if isinstance(conf, dict):
- conf = flow_config_str(conf)
+ id_ = str(flow_run_dir.relative_to(cylc_run_dir))
+ # set the default simulation runtime to zero (can be overridden)
+ (
+ conf.setdefault('runtime', {})
+ .setdefault('root', {})
+ .setdefault('simulation', {})
+ .setdefault('default run length', 'PT0S')
+ )
+ conf = flow_config_str(conf)
with open((flow_run_dir / WorkflowFiles.FLOW_FILE), 'w+') as flow_file:
flow_file.write(conf)
- return reg
+ return id_
@contextmanager
@@ -78,13 +84,17 @@ def _make_scheduler():
"""Return a scheduler object for a flow registration."""
schd: Scheduler = None # type: ignore[assignment]
- def __make_scheduler(reg: str, **opts: Any) -> Scheduler:
- # This allows paused_start to be overridden:
- opts = {'paused_start': True, **opts}
+ def __make_scheduler(id_: str, **opts: Any) -> Scheduler:
+ opts = {
+ # safe n sane defaults for integration tests
+ 'paused_start': True,
+ 'run_mode': 'simulation',
+ **opts,
+ }
options = RunOptions(**opts)
# create workflow
nonlocal schd
- schd = Scheduler(reg, options)
+ schd = Scheduler(id_, options)
return schd
yield __make_scheduler
diff --git a/tests/integration/utils/test_flow_tools.py b/tests/integration/utils/test_flow_tools.py
index e3ec926364b..04e292eb4ed 100644
--- a/tests/integration/utils/test_flow_tools.py
+++ b/tests/integration/utils/test_flow_tools.py
@@ -25,8 +25,8 @@
# test _make_flow via the conftest fixture
def test_flow(run_dir, flow, one_conf):
"""It should create a flow in the run directory."""
- reg = flow(one_conf)
- assert Path(run_dir / reg).exists()
- assert Path(run_dir / reg / 'flow.cylc').exists()
- with open(Path(run_dir / reg / 'flow.cylc'), 'r') as flow_file:
+ id_ = flow(one_conf)
+ assert Path(run_dir / id_).exists()
+ assert Path(run_dir / id_ / 'flow.cylc').exists()
+ with open(Path(run_dir / id_ / 'flow.cylc'), 'r') as flow_file:
assert 'scheduling' in flow_file.read()
diff --git a/tests/integration/validate/test_outputs.py b/tests/integration/validate/test_outputs.py
new file mode 100644
index 00000000000..a91393366b5
--- /dev/null
+++ b/tests/integration/validate/test_outputs.py
@@ -0,0 +1,165 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Test validation of the [runtime][][outputs] section."""
+
+from random import random
+
+import pytest
+
+from cylc.flow.exceptions import WorkflowConfigError
+from cylc.flow.unicode_rules import TaskOutputValidator, TaskMessageValidator
+
+
+@pytest.mark.parametrize(
+ 'outputs, valid',
+ [
+ pytest.param(
+ [
+ 'foo',
+ 'foo-bar',
+ 'foo_bar',
+ 'foo.bar',
+ '0foo0',
+ '123',
+ ],
+ True,
+ id='valid',
+ ),
+ pytest.param(
+ [
+ # special prefix
+ '_cylc',
+ # nasty chars
+ 'foo bar',
+ 'foo,bar',
+ 'foo/bar',
+ 'foo+bar',
+ # keywords
+ 'required',
+ 'all',
+ # built-in qualifiers
+ 'succeeded',
+ 'succeed-all',
+ # alternative qualifiers
+ 'succeed',
+ ],
+ False,
+ id='invalid',
+ ),
+ ],
+)
+def test_outputs(outputs, valid, flow, validate):
+ """It should validate task outputs.
+
+ Outputs i.e. the keys of the [outputs] section.
+
+ We don't want users adding outputs that override built-in
+ outputs (e.g. succeeded, failed) or qualifiers (e.g. succeed-all).
+
+ We don't want users adding outputs that conflict with keywords e.g.
+ "required" or "all".
+ """
+ # test that each output validates correctly
+ for output in outputs:
+ assert TaskOutputValidator.validate(output)[0] is valid
+
+ # test that output validation is actually being performed
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {'R1': 'foo'}
+ },
+ 'runtime': {
+ 'foo': {
+ 'outputs': {
+ output: str(random())
+ for output in outputs
+ }
+ }
+ },
+ })
+ val = lambda: validate(id_)
+ if valid:
+ val()
+ else:
+ with pytest.raises(WorkflowConfigError):
+ val()
+
+
+@pytest.mark.parametrize(
+ 'messages, valid',
+ [
+ pytest.param(
+ [
+ 'foo bar baz',
+ 'WARN:foo bar baz'
+ ],
+ True,
+ id='valid',
+ ),
+ pytest.param(
+ [
+ # special prefix
+ '_cylc',
+ # invalid colon usage
+ 'foo bar: baz'
+ # built-in qualifiers
+ 'succeeded',
+ 'succeed-all',
+ # alternative qualifiers
+ 'succeed',
+ ],
+ False,
+ id='invalid',
+ ),
+ ],
+)
+def test_messages(messages, valid, flow, validate):
+ """It should validate task messages.
+
+ Messages i.e. the values of the [outputs] section.
+
+ We don't want users adding messages that override built-in outputs (e.g.
+ succeeded, failed). To avoid confusion it's best to prohibit outputs which
+ override built-in qualifiers (e.g. succeed-all) too.
+
+ There's a special use of the colon character which users need to conform
+ with too.
+ """
+ # test that each message validates correctly
+ for message in messages:
+ assert TaskMessageValidator.validate(message)[0] is valid
+
+ # test that output validation is actually being performed
+ id_ = flow({
+ 'scheduling': {
+ 'graph': {'R1': 'foo'}
+ },
+ 'runtime': {
+ 'foo': {
+ 'outputs': {
+ str(random()): message
+ for message in messages
+ }
+ }
+ },
+ })
+ val = lambda: validate(id_)
+ if valid:
+ val()
+ else:
+ with pytest.raises(WorkflowConfigError):
+ val()
diff --git a/tests/unit/cfgspec/test_globalcfg.py b/tests/unit/cfgspec/test_globalcfg.py
index 2becda1caad..6db8d76dcf8 100644
--- a/tests/unit/cfgspec/test_globalcfg.py
+++ b/tests/unit/cfgspec/test_globalcfg.py
@@ -148,3 +148,13 @@ def test_source_dir_validation(
assert "must be an absolute path" in str(excinfo.value)
else:
glblcfg.load()
+
+def test_platform_ssh_forward_variables(mock_global_config):
+
+ glblcfg: GlobalConfig = mock_global_config('''
+ [platforms]
+ [[foo]]
+ ssh forward environment variables = "FOO", "BAR"
+ ''')
+
+ assert glblcfg.get(['platforms','foo','ssh forward environment variables']) == ["FOO", "BAR"]
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index 80b64ab604c..68095a5795e 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -70,7 +70,7 @@ def _tmp_run_dir(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
Adds the runN symlink automatically if the workflow ID ends with /run__.
Args:
- reg: Workflow name.
+ id_: Workflow name.
installed: If True, make it look like the workflow was installed
using cylc install (creates _cylc-install dir).
named: If True and installed is True, the _cylc-install dir will
@@ -83,7 +83,7 @@ def _tmp_run_dir(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
cylc_run_dir = tmp_run_dir()
"""
def __tmp_run_dir(
- reg: Optional[str] = None,
+ id_: Optional[str] = None,
installed: bool = False,
named: bool = False
) -> Path:
@@ -92,8 +92,8 @@ def __tmp_run_dir(
cylc_run_dir = tmp_path / 'cylc-run'
cylc_run_dir.mkdir(exist_ok=True)
monkeypatch.setattr('cylc.flow.pathutil._CYLC_RUN_DIR', cylc_run_dir)
- if reg:
- run_dir = cylc_run_dir.joinpath(reg)
+ if id_:
+ run_dir = cylc_run_dir.joinpath(id_)
run_dir.mkdir(parents=True, exist_ok=True)
(run_dir / WorkflowFiles.FLOW_FILE).touch(exist_ok=True)
(run_dir / WorkflowFiles.Service.DIRNAME).mkdir(exist_ok=True)
@@ -102,8 +102,8 @@ def __tmp_run_dir(
link_runN(run_dir)
if installed:
if named:
- if len(Path(reg).parts) < 2:
- raise ValueError("Named run requires two-level reg")
+ if len(Path(id_).parts) < 2:
+ raise ValueError("Named run requires two-level id_")
(run_dir.parent / WorkflowFiles.Install.DIRNAME).mkdir(
exist_ok=True)
else:
diff --git a/tests/unit/cycling/test_cycling.py b/tests/unit/cycling/test_cycling.py
index e21f6a3a501..ae90b68e62f 100644
--- a/tests/unit/cycling/test_cycling.py
+++ b/tests/unit/cycling/test_cycling.py
@@ -23,6 +23,21 @@
parse_exclusion,
)
+from cylc.flow.cycling.integer import (
+ IntegerPoint,
+ IntegerSequence,
+)
+
+from cylc.flow.cycling.iso8601 import (
+ ISO8601Point,
+ ISO8601Sequence,
+)
+
+from cylc.flow.cycling.loader import (
+ INTEGER_CYCLING_TYPE,
+ ISO8601_CYCLING_TYPE,
+)
+
def test_simple_abstract_class_test():
"""Cannot instantiate abstract classes, they must be defined in
@@ -73,3 +88,86 @@ def test_parse_bad_exclusion(expression):
"""Tests incorrectly formatted exclusions"""
with pytest.raises(Exception):
parse_exclusion(expression)
+
+
+@pytest.mark.parametrize(
+ 'sequence, wf_start_point, expected',
+ (
+ (
+ ('R/2/P2', 1),
+ None,
+ [2,4,6,8,10]
+ ),
+ (
+ ('R/2/P2', 1),
+ 3,
+ [4,6,8,10,12]
+ ),
+ ),
+)
+def test_get_first_n_points_integer(
+ set_cycling_type,
+ sequence, wf_start_point, expected
+):
+ """Test sequence get_first_n_points method.
+
+ (The method is implemented in the base class).
+ """
+ set_cycling_type(INTEGER_CYCLING_TYPE)
+ sequence = IntegerSequence(*sequence)
+ if wf_start_point is not None:
+ wf_start_point = IntegerPoint(wf_start_point)
+ expected = [
+ IntegerPoint(p)
+ for p in expected
+ ]
+ assert (
+ expected == (
+ sequence.get_first_n_points(
+ len(expected),
+ wf_start_point
+ )
+ )
+ )
+
+
+@pytest.mark.parametrize(
+ 'sequence, wf_start_point, expected',
+ (
+ (
+ ('R/2008/P2Y', '2001'),
+ None,
+ ['2008', '2010', '2012', '2014', '2016']
+ ),
+ (
+ ('R/2008/P2Y', '2001'),
+ '2009',
+ ['2010', '2012', '2014', '2016', '2018']
+ ),
+ ),
+)
+def test_get_first_n_points_iso8601(
+ set_cycling_type,
+ sequence, wf_start_point, expected
+):
+ """Test sequence get_first_n_points method.
+
+ (The method is implemented in the base class).
+ """
+ set_cycling_type(ISO8601_CYCLING_TYPE, 'Z')
+ sequence = ISO8601Sequence(*sequence)
+ if wf_start_point is not None:
+ wf_start_point = ISO8601Point(wf_start_point)
+ expected = [
+ ISO8601Point(p)
+ for p in expected
+ ]
+
+ assert (
+ expected == (
+ sequence.get_first_n_points(
+ len(expected),
+ wf_start_point
+ )
+ )
+ )
diff --git a/tests/unit/cycling/test_iso8601.py b/tests/unit/cycling/test_iso8601.py
index 8e95b809c1a..ae0eb957f47 100644
--- a/tests/unit/cycling/test_iso8601.py
+++ b/tests/unit/cycling/test_iso8601.py
@@ -14,9 +14,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import pytest
from datetime import datetime
+import pytest
+from pytest import param
+
from cylc.flow.cycling.iso8601 import (
ISO8601Interval,
ISO8601Point,
@@ -584,6 +586,29 @@ def test_multiple_exclusions_extensive(set_cycling_type):
assert sequence.get_prev_point(point_4) == point_1
+def test_exclusion_zero_duration_warning(set_cycling_type, caplog, log_filter):
+ """It should not log zero-duration warnings for exclusion points.
+
+ Exclusions may either be sequences or points. We first attempt to parse
+ them as sequences, if this fails, we attempt to parse them as points.
+
+ The zero-duration recurrence warning would be logged if we attempted to
+ parse a point as a sequence. To avoid spurious warnings this should be
+ turned off for exclusion parsing.
+
+ """
+ # parsing a point as a sequences causes a zero-duration warning
+ set_cycling_type(ISO8601_CYCLING_TYPE, "+05")
+ with pytest.raises(Exception):
+ ISO8601Sequence('3000', '2999')
+ assert log_filter(caplog, contains='zero-duration')
+
+ # parsing a point in an exclusion should not
+ caplog.clear()
+ ISO8601Sequence('P1Y ! 3000', '2999')
+ assert not log_filter(caplog, contains='zero-duration')
+
+
def test_simple(set_cycling_type):
"""Run some simple tests for date-time cycling."""
set_cycling_type(ISO8601_CYCLING_TYPE, "Z")
@@ -648,74 +673,52 @@ def test_simple(set_cycling_type):
assert not sequence.is_on_sequence(ISO8601Point("20100809T0005"))
-def test_next_simple(set_cycling_type):
+@pytest.mark.parametrize(
+ 'value, expected', [
+ ('next(T2100Z)', '20100808T2100Z'),
+ ('next(T00)', '20100809T0000Z'),
+ ('next(T-15)', '20100808T1615Z'),
+ ('next(T-45)', '20100808T1545Z'),
+ ('next(-10)', '21100101T0000Z'),
+ ('next(-1008)', '21100801T0000Z'),
+ ('next(--10)', '20101001T0000Z'),
+ ('next(--0325)', '20110325T0000Z'),
+ ('next(---10)', '20100810T0000Z'),
+ ('next(---05T1200Z)', '20100905T1200Z'),
+ param('next(--08-08)', '20110808T0000Z', marks=pytest.mark.xfail),
+ ('next(T15)', '20100809T1500Z'),
+ ('next(T-41)', '20100808T1541Z'),
+ ]
+)
+def test_next_simple(value: str, expected: str, set_cycling_type):
"""Test the generation of CP using 'next' from single input."""
set_cycling_type(ISO8601_CYCLING_TYPE, "Z")
- my_now = "20100808T1540Z"
- sequence = (
- "next(T2100Z)", # 20100808T2100Z
- "next(T00)", # 20100809T0000Z
- "next(T-15)", # 20100808T1615Z
- "next(T-45)", # 20100808T1545Z
- "next(-10)", # 21100101T0000Z
- "next(-1008)", # 21100801T0000Z
- "next(--10)", # 20101001T0000Z
- "next(--0325)", # 20110325T0000Z
- "next(---10)", # 20100810T0000Z
- "next(---05T1200Z)", # 20100905T1200Z
- )
+ my_now = "2010-08-08T15:41Z"
+ assert ingest_time(value, my_now) == expected
- output = []
- for point in sequence:
- output.append(ingest_time(point, my_now))
- assert output == [
- "20100808T2100Z",
- "20100809T0000Z",
- "20100808T1615Z",
- "20100808T1545Z",
- "21100101T0000Z",
- "21100801T0000Z",
- "20101001T0000Z",
- "20110325T0000Z",
- "20100810T0000Z",
- "20100905T1200Z",
+@pytest.mark.parametrize(
+ 'value, expected', [
+ ('previous(T2100Z)', '20100807T2100Z'),
+ ('previous(T00)', '20100808T0000Z'),
+ ('previous(T-15)', '20100808T1515Z'),
+ ('previous(T-45)', '20100808T1445Z'),
+ ('previous(-10)', '20100101T0000Z'),
+ ('previous(-1008)', '20100801T0000Z'),
+ ('previous(--10)', '20091001T0000Z'),
+ ('previous(--0325)', '20100325T0000Z'),
+ ('previous(---10)', '20100710T0000Z'),
+ ('previous(---05T1200Z)', '20100805T1200Z'),
+ param('previous(--08-08)', '20100808T0000Z', marks=pytest.mark.xfail),
+ ('previous(T15)', '20100808T1500Z'),
+ ('previous(T-41)', '20100808T1441Z'),
]
-
-
-def test_previous_simple(set_cycling_type):
+)
+def test_previous_simple(value: str, expected: str, set_cycling_type):
"""Test the generation of CP using 'previous' from single input."""
set_cycling_type(ISO8601_CYCLING_TYPE, "Z")
- my_now = "20100808T1540Z"
- sequence = (
- "previous(T2100Z)", # 20100807T2100Z
- "previous(T00)", # 20100808T0000Z
- "previous(T-15)", # 20100808T1515Z
- "previous(T-45)", # 20100808T1445Z
- "previous(-10)", # 20100101T0000Z
- "previous(-1008)", # 20100801T0000Z
- "previous(--10)", # 20091001T0000Z
- "previous(--0325)", # 20100325T0000Z
- "previous(---10)", # 20100710T0000Z
- "previous(---05T1200Z)", # 20100805T1200Z
- )
-
- output = []
-
- for point in sequence:
- output.append(ingest_time(point, my_now))
- assert output == [
- "20100807T2100Z",
- "20100808T0000Z",
- "20100808T1515Z",
- "20100808T1445Z",
- "20100101T0000Z",
- "20100801T0000Z",
- "20091001T0000Z",
- "20100325T0000Z",
- "20100710T0000Z",
- "20100805T1200Z",
- ]
+ my_now = "2010-08-08T15:41Z"
+ assert ingest_time(value, my_now) == expected
def test_sequence(set_cycling_type):
@@ -832,63 +835,40 @@ def test_weeks_days(set_cycling_type):
]
-def test_cug(set_cycling_type):
- """Test the offset CP examples in the Cylc user guide"""
+@pytest.mark.parametrize(
+ 'value, expected', [
+ ('next(T-00)', '20180314T1600Z'),
+ ('previous(T-00)', '20180314T1500Z'),
+ ('next(T-00; T-15; T-30; T-45)', '20180314T1515Z'),
+ ('previous(T-00; T-15; T-30; T-45)', '20180314T1500Z'),
+ ('next(T00)', '20180315T0000Z'),
+ ('previous(T00)', '20180314T0000Z'),
+ ('next(T06:30Z)', '20180315T0630Z'),
+ ('previous(T06:30) -P1D', '20180313T0630Z'),
+ ('next(T00; T06; T12; T18)', '20180314T1800Z'),
+ ('previous(T00; T06; T12; T18)', '20180314T1200Z'),
+ ('next(T00; T06; T12; T18)+P1W', '20180321T1800Z'),
+ ('PT1H', '20180314T1612Z'),
+ ('-P1M', '20180214T1512Z'),
+ ('next(-00)', '21000101T0000Z'),
+ ('previous(--01)', '20180101T0000Z'),
+ ('next(---01)', '20180401T0000Z'),
+ ('previous(--1225)', '20171225T0000Z'),
+ ('next(-2006)', '20200601T0000Z'),
+ ('previous(-W101)', '20180305T0000Z'),
+ ('next(-W-1; -W-3; -W-5)', '20180314T0000Z'),
+ ('next(-001; -091; -181; -271)', '20180401T0000Z'),
+ ('previous(-365T12Z)', '20171231T1200Z'),
+ ]
+)
+def test_user_guide_examples(value: str, expected: str, set_cycling_type):
+ """Test the offset CP examples in the Cylc user guide.
+
+ https://cylc.github.io/cylc-doc/stable/html/user-guide/writing-workflows/scheduling.html
+ """
set_cycling_type(ISO8601_CYCLING_TYPE, "Z")
my_now = "2018-03-14T15:12Z"
- sequence = (
- "next(T-00)", # 20180314T1600Z
- "previous(T-00)", # 20180314T1500Z
- "next(T-00; T-15; T-30; T-45)", # 20180314T1515Z
- "previous(T-00; T-15; T-30; T-45)", # 20180314T1500Z
- "next(T00)", # 20180315T0000Z
- "previous(T00)", # 20180314T0000Z
- "next(T06:30Z)", # 20180315T0630Z
- "previous(T06:30) -P1D", # 20180313T0630Z
- "next(T00; T06; T12; T18)", # 20180314T1800Z
- "previous(T00; T06; T12; T18)", # 20180314T1200Z
- "next(T00; T06; T12; T18)+P1W", # 20180321T1800Z
- "PT1H", # 20180314T1612Z
- "-P1M", # 20180214T1512Z
- "next(-00)", # 21000101T0000Z
- "previous(--01)", # 20180101T0000Z
- "next(---01)", # 20180401T0000Z
- "previous(--1225)", # 20171225T0000Z
- "next(-2006)", # 20200601T0000Z
- "previous(-W101)", # 20180305T0000Z
- "next(-W-1; -W-3; -W-5)", # 20180314T0000Z
- "next(-001; -091; -181; -271)", # 20180401T0000Z
- "previous(-365T12Z)", # 20171231T1200Z
- )
-
- output = []
-
- for point in sequence:
- output.append(ingest_time(point, my_now))
- assert output == [
- "20180314T1600Z",
- "20180314T1500Z",
- "20180314T1515Z",
- "20180314T1500Z",
- "20180315T0000Z",
- "20180314T0000Z",
- "20180315T0630Z",
- "20180313T0630Z",
- "20180314T1800Z",
- "20180314T1200Z",
- "20180321T1800Z",
- "20180314T1612Z",
- "20180214T1512Z",
- "21000101T0000Z",
- "20180101T0000Z",
- "20180401T0000Z",
- "20171225T0000Z",
- "20200601T0000Z",
- "20180305T0000Z",
- "20180314T0000Z",
- "20180401T0000Z",
- "20171231T1200Z",
- ]
+ assert ingest_time(value, my_now) == expected
def test_next_simple_no_now(set_cycling_type):
diff --git a/tests/unit/job_runner_handlers/test_pbs.py b/tests/unit/job_runner_handlers/test_pbs.py
index ac7c884046b..9bccf6a2314 100644
--- a/tests/unit/job_runner_handlers/test_pbs.py
+++ b/tests/unit/job_runner_handlers/test_pbs.py
@@ -20,6 +20,7 @@
JOB_RUNNER_HANDLER,
PBSHandler
)
+from cylc.flow.job_runner_mgr import JobRunnerManager
VERY_LONG_STR = 'x' * 240
@@ -118,3 +119,43 @@
)
def test_format_directives(job_conf: dict, lines: list):
assert JOB_RUNNER_HANDLER.format_directives(job_conf) == lines
+
+
+def test_filter_poll_many_output():
+ """It should strip trailing junk from job IDs.
+
+ Job IDs are assumed to be a series of numbers, optionally followed by a
+ full-stop and some other letters and numbers which are not needed for
+ job tracking purposes.
+
+ Job IDs are not expected to start with letters e.g. `abc.456` is not
+ supported.
+ """
+ assert JOB_RUNNER_HANDLER.filter_poll_many_output('''
+Job id Name User Time Use S Queue
+---------------- ---------------- ---------------- -------- - -----
+12345.foo.bar.baz test-pbs xxxxxxx 0 Q reomq
+23456.foo test-pbs xxxxxxx 0 Q romeq
+34567 test-pbs xxxxxxx 1 Q romeq
+abc.456 test-pbs xxxxxxx 2 Q romeq
+abcdef test-pbs xxxxxxx 2 Q romeq
+ ''') == ['12345', '23456', '34567']
+
+
+
+def test_filter_submit_output(tmp_path):
+ """See notes for test_filter_poll_many_output."""
+ status_file = tmp_path / 'submit_out'
+ status_file.touch()
+
+ def test(out):
+ return JobRunnerManager._filter_submit_output(
+ status_file,
+ JOB_RUNNER_HANDLER,
+ out,
+ '',
+ )[2]
+
+ assert test(' 12345.foo.bar.baz') == '12345'
+ assert test(' 12345.foo') == '12345'
+ assert test(' 12345') == '12345'
diff --git a/tests/unit/parsec/test_fileparse.py b/tests/unit/parsec/test_fileparse.py
index fde67099c0a..569632e131d 100644
--- a/tests/unit/parsec/test_fileparse.py
+++ b/tests/unit/parsec/test_fileparse.py
@@ -14,7 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import tempfile
+from tempfile import NamedTemporaryFile
+from contextlib import suppress
import os
import pytest
@@ -31,12 +32,15 @@
)
from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults
from cylc.flow.parsec.fileparse import (
+ EXTRA_VARS_TEMPLATE,
_prepend_old_templatevars,
_get_fpath_for_source,
+ get_cylc_env_vars,
addict,
addsect,
multiline,
parse,
+ process_plugins,
read_and_proc,
merge_template_vars
)
@@ -278,7 +282,7 @@ def test_multiline():
def test_read_and_proc_no_template_engine():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = None
viewcfg = {
@@ -305,7 +309,7 @@ def test_read_and_proc_no_template_engine():
def test_inline():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = None
viewcfg = {
@@ -313,7 +317,7 @@ def test_inline():
'contin': False, 'inline': True,
'mark': None, 'single': None, 'label': None
}
- with tempfile.NamedTemporaryFile() as include_file:
+ with NamedTemporaryFile() as include_file:
include_file.write("c=d".encode())
include_file.flush()
tf.write(("a=b\n%include \"{0}\""
@@ -325,7 +329,7 @@ def test_inline():
def test_inline_error():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = None
viewcfg = {
@@ -342,7 +346,7 @@ def test_inline_error():
def test_read_and_proc_jinja2():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = {
'name': 'Cylc'
@@ -358,8 +362,44 @@ def test_read_and_proc_jinja2():
assert r == ['a=Cylc']
+def test_read_and_proc_cwd(tmp_path):
+ """The template processor should be able to read workflow files.
+
+ This relies on moving to the config dir during file parsing.
+ """
+
+ sdir = tmp_path / "sub"
+ sdir.mkdir()
+
+ for sub in ["a", "b", "c"]:
+ (sdir / sub).touch()
+
+ viewcfg = {
+ 'empy': False,
+ 'jinja2': True,
+ 'contin': False,
+ 'inline': False
+ }
+
+ tmpf = tmp_path / "a.conf"
+
+ with open(tmpf, 'w') as tf:
+ tf.write(
+ '#!Jinja2'
+ '\n{% from "os" import listdir %}'
+ '\n{% for f in listdir("sub") %}'
+ '\n{{f}}'
+ '\n{% endfor %}'
+ )
+
+ with open(tmpf, 'r') as tf:
+ r = read_and_proc(fpath=tf.name, viewcfg=viewcfg)
+
+ assert sorted(r) == ['a', 'b', 'c']
+
+
def test_read_and_proc_jinja2_error():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = {
'name': 'Cylc'
@@ -380,7 +420,7 @@ def test_read_and_proc_jinja2_error():
def test_read_and_proc_jinja2_error_missing_shebang():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = {
'name': 'Cylc'
@@ -400,112 +440,107 @@ def test_read_and_proc_jinja2_error_missing_shebang():
# --- originally we had a test for empy here, moved to test_empysupport
def test_parse_keys_only_singleline():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write("#!jinja2\na={{ name }}\n".encode())
- tf.flush()
- r = parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- expected = OrderedDictWithDefaults()
- expected['a'] = 'Cylc'
- assert r == expected
- of.flush()
- output_file_contents = of.read().decode()
- assert output_file_contents == 'a=Cylc\n'
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write("#!jinja2\na={{ name }}\n".encode())
+ tf.flush()
+ r = parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ expected = OrderedDictWithDefaults()
+ expected['a'] = 'Cylc'
+ assert r == expected
+ of.flush()
+ output_file_contents = of.read().decode()
+ assert output_file_contents == 'a=Cylc\n'
def test_parse_keys_only_multiline():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write(
- "#!jinja2\na='''value is \\\n{{ name }}'''\n".encode())
- tf.flush()
- r = parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- expected = OrderedDictWithDefaults()
- expected['a'] = "'''value is Cylc'''"
- assert r == expected
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write(
+ "#!jinja2\na='''value is \\\n{{ name }}'''\n".encode())
+ tf.flush()
+ r = parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ expected = OrderedDictWithDefaults()
+ expected['a'] = "'''value is Cylc'''"
+ assert r == expected
def test_parse_invalid_line():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write("#!jinja2\n{{ name }}\n".encode())
- tf.flush()
- with pytest.raises(FileParseError) as cm:
- parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- exc = cm.value
- assert exc.reason == 'Invalid line'
- assert exc.line_num == 1
- assert exc.line == 'Cylc'
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write("#!jinja2\n{{ name }}\n".encode())
+ tf.flush()
+ with pytest.raises(FileParseError) as cm:
+ parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ exc = cm.value
+ assert exc.reason == 'Invalid line'
+ assert exc.line_num == 1
+ assert exc.line == 'Cylc'
def test_parse_comments():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write("#!jinja2\na={{ name }}\n# comment!".encode())
- tf.flush()
- r = parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- expected = OrderedDictWithDefaults()
- expected['a'] = 'Cylc'
- assert r == expected
- of.flush()
- output_file_contents = of.read().decode()
- assert output_file_contents == 'a=Cylc\n# comment!\n'
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write("#!jinja2\na={{ name }}\n# comment!".encode())
+ tf.flush()
+ r = parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ expected = OrderedDictWithDefaults()
+ expected['a'] = 'Cylc'
+ assert r == expected
+ of.flush()
+ output_file_contents = of.read().decode()
+ assert output_file_contents == 'a=Cylc\n# comment!\n'
def test_parse_with_sections():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write(("#!jinja2\n[section1]\n"
- "a={{ name }}\n# comment!\n"
- "[[subsection1]]\n"
- "[[subsection2]]\n"
- "[section2]").encode())
- tf.flush()
- r = parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- expected = OrderedDictWithDefaults()
- expected['section1'] = OrderedDictWithDefaults()
- expected['section1']['a'] = 'Cylc'
- expected['section1']['subsection1'] = OrderedDictWithDefaults()
- expected['section1']['subsection2'] = OrderedDictWithDefaults()
- expected['section2'] = OrderedDictWithDefaults()
- assert r == expected
- of.flush()
- output_file_contents = of.read().decode()
- assert output_file_contents == (
- '[section1]\na=Cylc\n# comment!\n'
- '[[subsection1]]\n'
- '[[subsection2]]\n'
- '[section2]\n'
- )
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write(("#!jinja2\n[section1]\n"
+ "a={{ name }}\n# comment!\n"
+ "[[subsection1]]\n"
+ "[[subsection2]]\n"
+ "[section2]").encode())
+ tf.flush()
+ r = parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ expected = OrderedDictWithDefaults()
+ expected['section1'] = OrderedDictWithDefaults()
+ expected['section1']['a'] = 'Cylc'
+ expected['section1']['subsection1'] = OrderedDictWithDefaults()
+ expected['section1']['subsection2'] = OrderedDictWithDefaults()
+ expected['section2'] = OrderedDictWithDefaults()
+ assert r == expected
+ of.flush()
+ output_file_contents = of.read().decode()
+ assert output_file_contents == (
+ '[section1]\na=Cylc\n# comment!\n'
+ '[[subsection1]]\n'
+ '[[subsection2]]\n'
+ '[section2]\n'
+ )
def test_parse_with_sections_missing_bracket():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = {
'name': 'Cylc'
@@ -522,27 +557,26 @@ def test_parse_with_sections_missing_bracket():
def test_parse_with_sections_error_wrong_level():
- with tempfile.NamedTemporaryFile() as of:
- with tempfile.NamedTemporaryFile() as tf:
- fpath = tf.name
- template_vars = {
- 'name': 'Cylc'
- }
- tf.write(("#!jinja2\n[section1]\n"
- "a={{ name }}\n# comment!\n"
- "[[[subsection1]]]\n") # expected [[]] instead!
- .encode())
- tf.flush()
- with pytest.raises(FileParseError) as cm:
- parse(fpath=fpath, output_fname=of.name,
- template_vars=template_vars)
- exc = cm.value
- assert exc.line_num == 4
- assert exc.line == '[[[subsection1]]]'
+ with NamedTemporaryFile() as of, NamedTemporaryFile() as tf:
+ fpath = tf.name
+ template_vars = {
+ 'name': 'Cylc'
+ }
+ tf.write(("#!jinja2\n[section1]\n"
+ "a={{ name }}\n# comment!\n"
+ "[[[subsection1]]]\n") # expected [[]] instead!
+ .encode())
+ tf.flush()
+ with pytest.raises(FileParseError) as cm:
+ parse(fpath=fpath, output_fname=of.name,
+ template_vars=template_vars)
+ exc = cm.value
+ assert exc.line_num == 4
+ assert exc.line == '[[[subsection1]]]'
def test_unclosed_multiline():
- with tempfile.NamedTemporaryFile() as tf:
+ with NamedTemporaryFile() as tf:
fpath = tf.name
template_vars = {
'name': 'Cylc'
@@ -645,11 +679,9 @@ def _inner(create_srclink=True):
src.mkdir(exist_ok=True)
link = tmp_path.parent / '_cylc-install/source'
link.parent.mkdir(exist_ok=True)
- try:
- os.symlink(src, link)
- except FileExistsError:
+ with suppress(FileExistsError):
# We don't mind the link persisting.
- pass
+ os.symlink(src, link)
return tmp_path / 'flow.cylc'
yield _inner
@@ -707,3 +739,70 @@ def test_get_fpath_for_source(tmp_path):
opts.against_source = True
assert _get_fpath_for_source(
rundir / 'flow.cylc', opts) == str(srcdir / 'flow.cylc')
+
+
+def test_user_has_no_cwd(tmp_path):
+ """Test we can parse a config file even if cwd does not exist."""
+ cwd = tmp_path / "cwd"
+ os.mkdir(cwd)
+ os.chdir(cwd)
+ os.rmdir(cwd)
+ # (I am now located in a non-existent directory. Outrageous!)
+ with NamedTemporaryFile() as tf:
+ fpath = tf.name
+ tf.write(('''
+ [scheduling]
+ [[graph]]
+ R1 = "foo"
+ ''').encode())
+ tf.flush()
+ # Should not raise FileNotFoundError from os.getcwd():
+ parse(fpath=fpath, output_fname="")
+
+
+def test_get_cylc_env_vars(monkeypatch):
+ """It should return CYLC env vars but not CYLC_VERSION or CYLC_ENV_NAME."""
+ monkeypatch.setattr(
+ 'os.environ',
+ {
+ "CYLC_VERSION": "betwixt",
+ "CYLC_ENV_NAME": "between",
+ "CYLC_QUESTION": "que?",
+ "CYLC_ANSWER": "42",
+ "FOO": "foo"
+ }
+ )
+ assert (
+ get_cylc_env_vars() == {
+ "CYLC_QUESTION": "que?",
+ "CYLC_ANSWER": "42",
+ }
+ )
+
+
+class EntryPointWrapper:
+ """Wraps a method to make it look like an entry point."""
+
+ def __init__(self, fcn):
+ self.name = fcn.__name__
+ self.fcn = fcn
+
+ def load(self):
+ return self.fcn
+
+
+@EntryPointWrapper
+def pre_configure_basic(*_, **__):
+ """Simple plugin that returns one env var and one template var."""
+ return {'env': {'foo': 44}, 'template_variables': {}}
+
+
+def test_plugins_not_called_on_global_config(monkeypatch):
+ monkeypatch.setattr(
+ 'cylc.flow.parsec.fileparse.iter_entry_points',
+ lambda x: [pre_configure_basic]
+ )
+ result = process_plugins('/pennine/way/flow.cylc', {})
+ assert result != EXTRA_VARS_TEMPLATE
+ result = process_plugins('/appalachian/trail/global.cylc', {})
+ assert result == EXTRA_VARS_TEMPLATE
diff --git a/tests/unit/parsec/test_upgrade.py b/tests/unit/parsec/test_upgrade.py
index a95420a1161..d132c1248a2 100644
--- a/tests/unit/parsec/test_upgrade.py
+++ b/tests/unit/parsec/test_upgrade.py
@@ -275,3 +275,17 @@ def test_expand_obsolete(self):
expanded = self.u.expand(upg)
self.assertEqual(1, len(expanded))
self.assertTrue(expanded[0]['new'] is None)
+
+
+def test_template_in_converter_description(caplog, capsys):
+ """Before and after values are available to the conversion descriptor"""
+ cfg = {'old': 42}
+ u = upgrader(cfg, 'Whateva')
+ u.deprecate(
+ '2.0.0', ['old'], ['new'],
+ cvtr=converter(lambda x: x + 20, '{old} -> {new}'),
+ silent=False,
+ )
+ u.upgrade()
+ assert cfg == {'new': 62}
+ assert '42 -> 62' in caplog.records[1].message
diff --git a/tests/unit/plugins/test_pre_configure.py b/tests/unit/plugins/test_pre_configure.py
index 717648ea594..5f54571e741 100644
--- a/tests/unit/plugins/test_pre_configure.py
+++ b/tests/unit/plugins/test_pre_configure.py
@@ -31,7 +31,7 @@ def __init__(self, fcn):
self.name = fcn.__name__
self.fcn = fcn
- def resolve(self):
+ def load(self):
return self.fcn
@@ -68,7 +68,7 @@ def test_pre_configure(monkeypatch):
'cylc.flow.parsec.fileparse.iter_entry_points',
lambda x: [pre_configure_basic]
)
- extra_vars = process_plugins(None, None)
+ extra_vars = process_plugins('/', None)
assert extra_vars == {
'env': {
'ANSWER': '42'
@@ -90,7 +90,7 @@ def test_pre_configure_duplicate(monkeypatch):
]
)
with pytest.raises(ParsecError):
- process_plugins(None, None)
+ process_plugins('/', None)
def test_pre_configure_templating_detected(monkeypatch):
@@ -103,7 +103,7 @@ def test_pre_configure_templating_detected(monkeypatch):
]
)
with pytest.raises(ParsecError):
- process_plugins(None, None)
+ process_plugins('/', None)
def test_pre_configure_exception(monkeypatch):
@@ -113,7 +113,7 @@ def test_pre_configure_exception(monkeypatch):
lambda x: [pre_configure_error]
)
with pytest.raises(PluginError) as exc_ctx:
- process_plugins(None, None)
+ process_plugins('/', None)
# the context of the original error should be preserved in the raised
# exception
assert exc_ctx.value.entry_point == 'cylc.pre_configure'
diff --git a/tests/unit/scripts/test_clean.py b/tests/unit/scripts/test_clean.py
index dc29d7ba1b6..cc1bf2eff2f 100644
--- a/tests/unit/scripts/test_clean.py
+++ b/tests/unit/scripts/test_clean.py
@@ -16,11 +16,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from typing import Callable, List
+from typing import Callable, List, Type, Union
import pytest
-from cylc.flow.scripts.clean import CleanOptions, scan, run
+from cylc.flow.exceptions import InputError
+from cylc.flow.scripts.clean import (
+ CleanOptions, _main, parse_timeout, scan, run
+)
async def test_scan(tmp_run_dir):
@@ -88,3 +91,40 @@ async def test_multi(tmp_run_dir: Callable, mute: List[str]):
mute.clear()
await run('*', opts=opts)
assert mute == ['bar/pub/beer', 'baz/run1', 'foo']
+
+
+@pytest.mark.parametrize(
+ 'timeout, expected',
+ [('100', '100'),
+ ('PT1M2S', '62'),
+ ('', ''),
+ ('oopsie', InputError),
+ (' ', InputError)]
+)
+def test_parse_timeout(
+ timeout: str,
+ expected: Union[str, Type[InputError]]
+):
+ """It should accept ISO 8601 format or number of seconds."""
+ opts = CleanOptions(remote_timeout=timeout)
+
+ if expected is InputError:
+ with pytest.raises(expected):
+ parse_timeout(opts)
+ else:
+ parse_timeout(opts)
+ assert opts.remote_timeout == expected
+
+
+@pytest.mark.parametrize(
+ 'opts, expected_msg',
+ [
+ ({'local_only': True, 'remote_only': True}, "mutually exclusive"),
+ ({'remote_timeout': 'oops'}, "Invalid timeout"),
+ ]
+)
+def test_bad_user_input(opts: dict, expected_msg: str, mute):
+ """It should raise an InputError for bad user input."""
+ with pytest.raises(InputError) as exc_info:
+ _main(CleanOptions(**opts), 'blah')
+ assert expected_msg in str(exc_info.value)
diff --git a/tests/unit/scripts/test_completion_server.py b/tests/unit/scripts/test_completion_server.py
index b8a0c642c70..186e13b7272 100644
--- a/tests/unit/scripts/test_completion_server.py
+++ b/tests/unit/scripts/test_completion_server.py
@@ -246,6 +246,12 @@ async def test_complete_cylc(dummy_workflow):
'cylc', '62656566', '--77656C6C696E67746F6E='
) == set()
+ # $ cylc cat-log f
+ assert await _complete_cylc('cylc', 'cat-log', 'f') == {'foo/run2//'}
+
+ # $ cylc log f # NOTE: "log" is an alias for "cat-log"
+ assert await _complete_cylc('cylc', 'log', 'f') == {'foo/run2//'}
+
# $ cylc help
assert 'all' in await _complete_cylc('cylc', 'help', '')
@@ -392,18 +398,17 @@ def test_list_options(monkeypatch):
assert list_options('zz9+za') == []
# patch the logic to turn off the auto_add behaviour of CylcOptionParser
- def _resolve():
- def _parser_function():
- parser = get_option_parser()
- del parser.auto_add
- return parser
-
- return SimpleNamespace(parser_function=_parser_function)
-
- monkeypatch.setattr(
- COMMANDS['trigger'],
- 'resolve',
- _resolve
+ class EntryPoint:
+ def load(self):
+ def _parser_function():
+ parser = get_option_parser()
+ del parser.auto_add
+ return parser
+ return SimpleNamespace(parser_function=_parser_function)
+ monkeypatch.setitem(
+ COMMANDS,
+ 'trigger',
+ EntryPoint(),
)
# with auto_add turned off the --color option should be absent
@@ -668,7 +673,7 @@ def _get_current_completion_script_version(_script, lang):
# set the completion script compatibility range to >=1.0.0, <2.0.0
monkeypatch.setattr(
'cylc.flow.scripts.completion_server.REQUIRED_SCRIPT_VERSION',
- 'completion-script >=1.0.0, <2.0.0',
+ '>=1.0.0, <2.0.0',
)
monkeypatch.setattr(
'cylc.flow.scripts.completion_server'
diff --git a/tests/unit/scripts/test_cylc.py b/tests/unit/scripts/test_cylc.py
index f1483e9ee4a..9928024bb66 100644
--- a/tests/unit/scripts/test_cylc.py
+++ b/tests/unit/scripts/test_cylc.py
@@ -15,14 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import pkg_resources
+import os
+import sys
from types import SimpleNamespace
from typing import Callable
from unittest.mock import Mock
import pytest
-from cylc.flow.scripts.cylc import iter_commands
+from cylc.flow.scripts.cylc import iter_commands, pythonpath_manip
from ..conftest import MonkeyMock
@@ -30,12 +31,9 @@
@pytest.fixture
def mock_entry_points(monkeypatch: pytest.MonkeyPatch):
"""Mock a range of entry points."""
- def _resolve_fail(*args, **kwargs):
+ def _load_fail(*args, **kwargs):
raise ModuleNotFoundError('foo')
- def _require_fail(*args, **kwargs):
- raise pkg_resources.DistributionNotFound('foo', ['my_extras'])
-
def _resolve_ok(*args, **kwargs):
return Mock()
@@ -47,24 +45,18 @@ def _mocked_entry_points(include_bad: bool = False):
# an entry point with all dependencies installed:
'good': SimpleNamespace(
name='good',
- module_name='os.path',
- resolve=_resolve_ok,
- require=_require_ok,
+ module='os.path',
+ load=_resolve_ok,
+ extras=[],
+ dist=SimpleNamespace(name='a'),
),
# an entry point with optional dependencies missing:
'missing': SimpleNamespace(
name='missing',
- module_name='not.a.python.module', # force an import error
- resolve=_resolve_fail,
- require=_require_fail,
- ),
- # an entry point with optional dependencies missing, but they
- # are not needed for the core functionality of the entry point:
- 'partial': SimpleNamespace(
- name='partial',
- module_name='os.path',
- resolve=_resolve_ok,
- require=_require_fail,
+ module='not.a.python.module', # force an import error
+ load=_load_fail,
+ extras=[],
+ dist=SimpleNamespace(name='foo'),
),
}
if include_bad:
@@ -72,9 +64,11 @@ def _mocked_entry_points(include_bad: bool = False):
# missing:
commands['bad'] = SimpleNamespace(
name='bad',
- module_name='not.a.python.module',
- resolve=_resolve_fail,
+ module='not.a.python.module',
+ load=_load_fail,
require=_require_ok,
+ extras=[],
+ dist=SimpleNamespace(name='d'),
)
monkeypatch.setattr('cylc.flow.scripts.cylc.COMMANDS', commands)
@@ -88,14 +82,13 @@ def test_iter_commands(mock_entry_points):
"""
mock_entry_points()
commands = list(iter_commands())
- assert [i[0] for i in commands] == ['good', 'partial']
+ assert [i[0] for i in commands] == ['good']
def test_iter_commands_bad(mock_entry_points):
- """Test listing commands fails if there is an unexpected import error."""
+ """Test listing commands doesn't fail on import error."""
mock_entry_points(include_bad=True)
- with pytest.raises(ModuleNotFoundError):
- list(iter_commands())
+ list(iter_commands())
def test_execute_cmd(
@@ -123,16 +116,33 @@ def test_execute_cmd(
execute_cmd('missing')
capexit.assert_any_call(1)
assert capsys.readouterr().err.strip() == (
- "cylc missing: The 'foo' distribution was not found and is"
- " required by my_extras"
+ '"cylc missing" requires "foo"\n\nModuleNotFoundError: foo'
)
- # the "partial" entry point should exit 0
- capexit.reset_mock()
- execute_cmd('partial')
- capexit.assert_called_once_with()
- assert capsys.readouterr().err == ''
+ # the "bad" entry point should log an error
+ execute_cmd('bad')
+ capexit.assert_any_call(1)
+
+ stderr = capsys.readouterr().err.strip()
+ assert '"cylc bad" requires "d"' in stderr
+ assert 'ModuleNotFoundError: foo' in stderr
+
- # the "bad" entry point should raise an exception
- with pytest.raises(ModuleNotFoundError):
- execute_cmd('bad')
+def test_pythonpath_manip(monkeypatch):
+ """pythonpath_manip removes items in PYTHONPATH from sys.path
+
+ and adds items from CYLC_PYTHONPATH
+ """
+ monkeypatch.setenv('PYTHONPATH', '/remove1:/remove2')
+ monkeypatch.setattr('sys.path', ['/leave-alone', '/remove1', '/remove2'])
+ pythonpath_manip()
+ # ... we don't change PYTHONPATH
+ assert os.environ['PYTHONPATH'] == '/remove1:/remove2'
+ # ... but we do remove PYTHONPATH items from sys.path, and don't remove
+ # items there not in PYTHONPATH
+ assert sys.path == ['/leave-alone']
+ # If CYLC_PYTHONPATH is set we retrieve its contents and
+ # add them to the sys.path:
+ monkeypatch.setenv('CYLC_PYTHONPATH', '/add1:/add2')
+ pythonpath_manip()
+ assert sys.path == ['/add1', '/add2', '/leave-alone']
diff --git a/tests/unit/scripts/test_lint.py b/tests/unit/scripts/test_lint.py
index 205d09019fa..6ee1018cf96 100644
--- a/tests/unit/scripts/test_lint.py
+++ b/tests/unit/scripts/test_lint.py
@@ -28,11 +28,10 @@
MANUAL_DEPRECATIONS,
get_cylc_files,
get_pyproject_toml,
- get_reference_rst,
- get_reference_text,
+ get_reference,
get_upgrader_info,
lint,
- merge_cli_with_tomldata,
+ _merge_cli_with_tomldata,
parse_checks,
validate_toml_items
)
@@ -103,6 +102,7 @@
pre-script = "echo ${CYLC_SUITE_DEF_PATH}"
script = {{HELLOWORLD}}
post-script = "echo ${CYLC_SUITE_INITIAL_CYCLE_TIME}"
+ env-script = POINT=$(rose date 2059 --offset P1M)
[[[suite state polling]]]
template = and
[[[remote]]]
@@ -142,7 +142,6 @@
[[and_another_thing]]
[[[remote]]]
host = `rose host-select thingy`
-
"""
@@ -159,6 +158,9 @@
# {{quix}}
[runtime]
+ [[this_is_ok]]
+ script = echo "this is incorrectly indented"
+
[[foo]]
inherit = hello
[[[job]]]
@@ -332,6 +334,12 @@ def test_check_cylc_file_jinja2_comments():
assert not any('S011' in msg for msg in lint.messages)
+def test_check_cylc_file_jinja2_comments_shell_arithmetic_not_warned():
+ """Jinja2 after a $((10#$variable)) should not warn"""
+ lint = lint_text('#!jinja2\na = b$((10#$foo+5)) {{ BAR }}', ['style'])
+ assert not any('S011' in msg for msg in lint.messages)
+
+
@pytest.mark.parametrize(
# 11 won't be tested because there is no jinja2 shebang
'number', set(range(1, len(MANUAL_DEPRECATIONS) + 1)) - {11}
@@ -359,35 +367,47 @@ def test_get_cylc_files_get_all_rcs(tmp_path):
assert sorted(result) == sorted(expect)
-MOCK_CHECKS = {
- 'U042': {
- 'short': 'section `[vizualization]` has been removed.',
- 'url': 'some url or other',
- 'purpose': 'U',
- 'rst': 'section ``[vizualization]`` has been removed.',
- 'function': re.compile('not a regex')
- },
-}
+def mock_parse_checks(*args, **kwargs):
+ return {
+ 'U042': {
+ 'short': 'section `[vizualization]` has been removed.',
+ 'url': 'some url or other',
+ 'purpose': 'U',
+ 'rst': 'section ``[vizualization]`` has been removed.',
+ 'function': re.compile('not a regex')
+ },
+ }
-def test_get_reference_rst():
+def test_get_reference_rst(monkeypatch):
"""It produces a reference file for our linting."""
- ref = get_reference_rst(MOCK_CHECKS)
+ monkeypatch.setattr(
+ 'cylc.flow.scripts.lint.parse_checks', mock_parse_checks
+ )
+ ref = get_reference('all', 'rst')
expect = (
'\n7 to 8 upgrades\n---------------\n\n'
- 'U042\n^^^^\nsection ``[vizualization]`` has been '
+ '`U042 `_'
+ f'\n{ "^" * 78 }'
+ '\nsection ``[vizualization]`` has been '
'removed.\n\n\n'
)
assert ref == expect
-def test_get_reference_text():
+def test_get_reference_text(monkeypatch):
"""It produces a reference file for our linting."""
- ref = get_reference_text(MOCK_CHECKS)
+ monkeypatch.setattr(
+ 'cylc.flow.scripts.lint.parse_checks', mock_parse_checks
+ )
+ ref = get_reference('all', 'text')
expect = (
'\n7 to 8 upgrades\n---------------\n\n'
'U042:\n section `[vizualization]` has been '
- 'removed.\n\n\n'
+ 'removed.'
+ '\n https://cylc.github.io/cylc-doc/stable/html/7-to-8/some'
+ ' url or other\n\n\n'
)
assert ref == expect
@@ -556,7 +576,7 @@ def test_validate_toml_items(input_, error):
)
def test_merge_cli_with_tomldata(clidata, tomldata, expect):
"""It merges each of the three sections correctly: see function.__doc__"""
- assert merge_cli_with_tomldata(clidata, tomldata) == expect
+ assert _merge_cli_with_tomldata(clidata, tomldata) == expect
def test_invalid_tomlfile(tmp_path):
@@ -572,11 +592,45 @@ def test_invalid_tomlfile(tmp_path):
'ref, expect',
[
[True, 'line > ```` characters'],
- [False, 'line > 130 characters']
+ [False, 'line > 42 characters']
]
)
def test_parse_checks_reference_mode(ref, expect):
- result = parse_checks(['style'], reference=ref)
- key = list(result.keys())[-1]
- value = result[key]
+ """Add extra explanation of max line legth setting in reference mode.
+ """
+ result = parse_checks(['style'], reference=ref, max_line_len=42)
+ value = result['S012']
assert expect in value['short']
+
+
+@pytest.mark.parametrize(
+ 'spaces, expect',
+ (
+ (0, 'S002'),
+ (1, 'S013'),
+ (2, 'S013'),
+ (3, 'S013'),
+ (4, None),
+ (5, 'S013'),
+ (6, 'S013'),
+ (7, 'S013'),
+ (8, None),
+ (9, 'S013')
+ )
+)
+def test_indents(spaces, expect):
+ """Test different wrong indentations
+
+ Parameterization deliberately over-obvious to avoid replicating
+ arithmetic logic from code. Dangerously close to re-testing ``%``
+ builtin.
+ """
+ result = lint_text(
+ f"{' ' * spaces}foo = 42",
+ ['style']
+ )
+ result = ''.join(result.messages)
+ if expect:
+ assert expect in result
+ else:
+ assert not result
diff --git a/tests/unit/test_async_util.py b/tests/unit/test_async_util.py
index 17823817dbe..56373e7185d 100644
--- a/tests/unit/test_async_util.py
+++ b/tests/unit/test_async_util.py
@@ -15,6 +15,7 @@
# along with this program. If not, see .
import asyncio
+from inspect import signature
import logging
from pathlib import Path
from random import random
@@ -209,14 +210,17 @@ def test_pipe_brackets():
@pipe
-async def documented(x):
+async def documented(x: str, y: int = 0):
"""The docstring for the pipe function."""
pass
def test_documentation():
- """It should preserve the docstring of pipe functions."""
+ """It should preserve the docstring, signature & annotations of
+ the wrapped function."""
assert documented.__doc__ == 'The docstring for the pipe function.'
+ assert documented.__annotations__ == {'x': str, 'y': int}
+ assert str(signature(documented)) == '(x: str, y: int = 0)'
def test_rewind():
diff --git a/tests/unit/test_clean.py b/tests/unit/test_clean.py
index f335b2d110d..a0c0ccdda1e 100644
--- a/tests/unit/test_clean.py
+++ b/tests/unit/test_clean.py
@@ -19,6 +19,7 @@
import shutil
from glob import iglob
from pathlib import Path
+from subprocess import Popen
from typing import (
Any,
Callable,
@@ -86,7 +87,7 @@ def glbl_cfg_max_scan_depth(mock_glbl_cfg: Callable) -> None:
@pytest.mark.parametrize(
- 'reg, stopped, err, err_msg',
+ 'id_, stopped, err, err_msg',
[
('foo/..', True, WorkflowFilesError,
"cannot be a path that points to the cylc-run directory or above"),
@@ -96,7 +97,7 @@ def glbl_cfg_max_scan_depth(mock_glbl_cfg: Callable) -> None:
]
)
def test_clean_check__fail(
- reg: str,
+ id_: str,
stopped: bool,
err: Type[Exception],
err_msg: str,
@@ -106,7 +107,7 @@ def test_clean_check__fail(
"""Test that _clean_check() fails appropriately.
Params:
- reg: Workflow name.
+ id_: Workflow name.
stopped: Whether the workflow is stopped when _clean_check() is called.
err: Expected error class.
err_msg: Message that is expected to be in the exception.
@@ -121,7 +122,7 @@ def mocked_detect_old_contact_file(*a, **k):
)
with pytest.raises(err) as exc:
- cylc_clean._clean_check(CleanOptions(), reg, tmp_path)
+ cylc_clean._clean_check(CleanOptions(), id_, tmp_path)
assert err_msg in str(exc.value)
@@ -162,15 +163,15 @@ def test_init_clean(
clean_called: If a local clean is expected to go ahead.
remote_clean_called: If a remote clean is expected to go ahead.
"""
- reg = 'foo/bar/'
- rdir = tmp_run_dir(reg, installed=True)
+ id_ = 'foo/bar/'
+ rdir = tmp_run_dir(id_, installed=True)
Path(rdir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
mock_clean = monkeymock('cylc.flow.clean.clean')
mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
monkeypatch.setattr('cylc.flow.clean.get_platforms_from_db',
lambda x: set(db_platforms))
- init_clean(reg, opts=CleanOptions(**opts))
+ init_clean(id_, opts=CleanOptions(**opts))
assert mock_clean.called is clean_called
assert mock_remote_clean.called is remote_clean_called
@@ -261,8 +262,8 @@ def test_init_clean__rm_dirs(
expected_remote_clean: The dirs that are expected to be passed to
remote_clean().
"""
- reg = 'dagobah'
- run_dir: Path = tmp_run_dir(reg)
+ id_ = 'dagobah'
+ run_dir: Path = tmp_run_dir(id_)
Path(run_dir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
mock_clean = monkeymock('cylc.flow.clean.clean')
mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
@@ -271,14 +272,15 @@ def test_init_clean__rm_dirs(
lambda x: platforms)
opts = CleanOptions(rm_dirs=rm_dirs) if rm_dirs else CleanOptions()
- init_clean(reg, opts=opts)
- mock_clean.assert_called_with(reg, run_dir, expected_clean)
+ init_clean(id_, opts=opts)
+ mock_clean.assert_called_with(id_, run_dir, expected_clean)
mock_remote_clean.assert_called_with(
- reg, platforms, expected_remote_clean, opts.remote_timeout)
+ id_, platforms, opts.remote_timeout, expected_remote_clean
+ )
@pytest.mark.parametrize(
- 'reg, symlink_dirs, rm_dirs, expected_deleted, expected_remaining',
+ 'id_, symlink_dirs, rm_dirs, expected_deleted, expected_remaining',
[
pytest.param(
'foo/bar',
@@ -357,7 +359,7 @@ def test_init_clean__rm_dirs(
]
)
def test_clean(
- reg: str,
+ id_: str,
symlink_dirs: Dict[str, str],
rm_dirs: Optional[Set[str]],
expected_deleted: List[str],
@@ -367,7 +369,7 @@ def test_clean(
"""Test the clean() function.
Params:
- reg: Workflow name.
+ id_: Workflow name.
symlink_dirs: As you would find in the global config
under [symlink dirs][platform].
rm_dirs: As passed to clean().
@@ -377,16 +379,16 @@ def test_clean(
not expected to be cleaned.
"""
# --- Setup ---
- run_dir: Path = tmp_run_dir(reg)
+ run_dir: Path = tmp_run_dir(id_)
if 'run' in symlink_dirs:
- target = tmp_path / symlink_dirs['run'] / 'cylc-run' / reg
+ target = tmp_path / symlink_dirs['run'] / 'cylc-run' / id_
target.mkdir(parents=True)
shutil.rmtree(run_dir)
run_dir.symlink_to(target)
symlink_dirs.pop('run')
for symlink_name, target_name in symlink_dirs.items():
- target = tmp_path / target_name / 'cylc-run' / reg / symlink_name
+ target = tmp_path / target_name / 'cylc-run' / id_ / symlink_name
target.mkdir(parents=True)
symlink = run_dir / symlink_name
symlink.symlink_to(target)
@@ -398,7 +400,7 @@ def test_clean(
assert (tmp_path / rel_path).exists()
# --- The actual test ---
- cylc_clean.clean(reg, run_dir, rm_dirs)
+ cylc_clean.clean(id_, run_dir, rm_dirs)
for rel_path in expected_deleted:
assert (tmp_path / rel_path).exists() is False
assert (tmp_path / rel_path).is_symlink() is False
@@ -411,16 +413,16 @@ def test_clean__broken_symlink_run_dir(
) -> None:
"""Test clean() successfully remove a run dir that is a broken symlink."""
# Setup
- reg = 'foo/bar'
- run_dir: Path = tmp_run_dir(reg)
- target = tmp_path.joinpath('rabbow/cylc-run', reg)
+ id_ = 'foo/bar'
+ run_dir: Path = tmp_run_dir(id_)
+ target = tmp_path.joinpath('rabbow/cylc-run', id_)
target.mkdir(parents=True)
shutil.rmtree(run_dir)
run_dir.symlink_to(target)
target.rmdir()
assert run_dir.parent.exists() is True # cylc-run/foo should exist
# Test
- cylc_clean.clean(reg, run_dir)
+ cylc_clean.clean(id_, run_dir)
assert run_dir.parent.exists() is False # cylc-run/foo should be gone
assert target.parent.exists() is False # rabbow/cylc-run/foo too
@@ -430,16 +432,16 @@ def test_clean__bad_symlink_dir_wrong_type(
) -> None:
"""Test clean() raises error when a symlink dir actually points to a file
instead of a dir"""
- reg = 'foo'
- run_dir: Path = tmp_run_dir(reg)
+ id_ = 'foo'
+ run_dir: Path = tmp_run_dir(id_)
symlink = run_dir.joinpath('log')
- target = tmp_path.joinpath('sym-log', 'cylc-run', reg, 'meow.txt')
+ target = tmp_path.joinpath('sym-log', 'cylc-run', id_, 'meow.txt')
target.parent.mkdir(parents=True)
target.touch()
symlink.symlink_to(target)
with pytest.raises(WorkflowFilesError) as exc:
- cylc_clean.clean(reg, run_dir)
+ cylc_clean.clean(id_, run_dir)
assert "Invalid symlink at" in str(exc.value)
assert symlink.exists() is True
@@ -465,13 +467,13 @@ def test_clean__bad_symlink_dir_wrong_form(
def test_clean__rm_dir_not_file(pattern: str, tmp_run_dir: Callable):
"""Test clean() does not remove a file when the rm_dir glob pattern would
match a dir only."""
- reg = 'foo'
- run_dir: Path = tmp_run_dir(reg)
+ id_ = 'foo'
+ run_dir: Path = tmp_run_dir(id_)
a_file = run_dir.joinpath('thing')
a_file.touch()
rm_dirs = parse_rm_dirs([pattern])
- cylc_clean.clean(reg, run_dir, rm_dirs)
+ cylc_clean.clean(id_, run_dir, rm_dirs)
assert a_file.exists()
@@ -483,7 +485,7 @@ def filetree_for_testing_cylc_clean(tmp_path: Path):
See tests/unit/filetree.py
Args:
- reg: Workflow name.
+ id_: Workflow name.
initial_filetree: The filetree before cleaning.
filetree_left_behind: The filetree that is expected to be left behind
after cleaning, excluding the 'you-shall-not-pass/' directory,
@@ -495,7 +497,7 @@ def filetree_for_testing_cylc_clean(tmp_path: Path):
files_not_to_delete: List of files that are not expected to be deleted.
"""
def _filetree_for_testing_cylc_clean(
- reg: str,
+ id_: str,
initial_filetree: Dict[str, Any],
filetree_left_behind: Dict[str, Any]
) -> Tuple[Path, List[str], List[str]]:
@@ -512,7 +514,7 @@ def _filetree_for_testing_cylc_clean(
files_not_to_delete
)
)
- run_dir = tmp_path / 'cylc-run' / reg
+ run_dir = tmp_path / 'cylc-run' / id_
return run_dir, files_to_delete, files_not_to_delete
return _filetree_for_testing_cylc_clean
@@ -753,16 +755,16 @@ def test_clean__targeted(
# --- Setup ---
caplog.set_level(logging.DEBUG, CYLC_LOG)
tmp_run_dir()
- reg = 'foo/bar'
+ id_ = 'foo/bar'
run_dir: Path
files_to_delete: List[str]
files_not_to_delete: List[str]
run_dir, files_to_delete, files_not_to_delete = (
filetree_for_testing_cylc_clean(
- reg, initial_filetree, filetree_left_behind)
+ id_, initial_filetree, filetree_left_behind)
)
# --- Test ---
- cylc_clean.clean(reg, run_dir, rm_dirs)
+ cylc_clean.clean(id_, run_dir, rm_dirs)
for file in files_not_to_delete:
assert os.path.exists(file) is True
for file in files_to_delete:
@@ -920,7 +922,7 @@ def test_remote_clean(
# Remove randomness:
monkeymock('cylc.flow.clean.shuffle')
- def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout):
+ def mocked_remote_clean_cmd_side_effect(id_, platform, timeout, rm_dirs):
proc_ret_code = 0
if failed_platforms and platform['name'] in failed_platforms:
proc_ret_code = failed_platforms[platform['name']]
@@ -936,23 +938,25 @@ def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout):
side_effect=mocked_remote_clean_cmd_side_effect)
rm_dirs = ["whatever"]
# ----- Test -----
- reg = 'foo'
+ id_ = 'foo'
platform_names = (
"This arg bypassed as we provide the install targets map in the test")
if exc_expected:
with pytest.raises(CylcError) as exc:
cylc_clean.remote_clean(
- reg, platform_names, rm_dirs, timeout='irrelevant')
+ id_, platform_names, timeout='irrelevant', rm_dirs=rm_dirs
+ )
assert "Remote clean failed" in str(exc.value)
else:
cylc_clean.remote_clean(
- reg, platform_names, rm_dirs, timeout='irrelevant')
+ id_, platform_names, timeout='irrelevant', rm_dirs=rm_dirs
+ )
for msg in expected_err_msgs:
assert log_filter(caplog, level=logging.ERROR, contains=msg)
if expected_platforms:
for p_name in expected_platforms:
mocked_remote_clean_cmd.assert_any_call(
- reg, PLATFORMS[p_name], rm_dirs, 'irrelevant')
+ id_, PLATFORMS[p_name], rm_dirs, 'irrelevant')
else:
mocked_remote_clean_cmd.assert_not_called()
if failed_platforms:
@@ -960,6 +964,36 @@ def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout):
assert f"{p_name} - {PlatformError.MSG_TIDY}" in caplog.text
+def test_remote_clean__timeout(
+ monkeymock: MonkeyMock,
+ monkeypatch: pytest.MonkeyPatch,
+ caplog: pytest.LogCaptureFixture,
+):
+ """Test remote_clean() gives a sensible error message for return code 124.
+ """
+ caplog.set_level(logging.ERROR, CYLC_LOG)
+ monkeymock(
+ 'cylc.flow.clean._remote_clean_cmd',
+ spec=_remote_clean_cmd,
+ return_value=mock.Mock(
+ spec=Popen, poll=lambda: 124, communicate=lambda: ('', '')
+ )
+ )
+ monkeypatch.setattr(
+ 'cylc.flow.clean.get_install_target_to_platforms_map',
+ lambda *a, **k: {'picard': [PLATFORMS['stargazer']]}
+ )
+
+ with pytest.raises(CylcError):
+ cylc_clean.remote_clean(
+ 'blah', platform_names=['blah'], timeout='blah'
+ )
+ assert "cylc clean timed out" in caplog.text
+ # No need to log the remote clean cmd etc. for timeout
+ assert "ssh" not in caplog.text.lower()
+ assert "stderr" not in caplog.text.lower()
+
+
@pytest.mark.parametrize(
'rm_dirs, expected_args',
[
@@ -980,7 +1014,7 @@ def test_remote_clean_cmd(
expected_args: Expected CLI arguments of the cylc clean command that
gets constructed.
"""
- reg = 'jean/luc/picard'
+ id_ = 'jean/luc/picard'
platform = {
'name': 'enterprise',
'install target': 'mars',
@@ -991,30 +1025,32 @@ def test_remote_clean_cmd(
'cylc.flow.clean.construct_ssh_cmd', return_value=['blah'])
monkeymock('cylc.flow.clean.Popen')
- cylc_clean._remote_clean_cmd(reg, platform, rm_dirs, timeout='dunno')
- args, kwargs = mock_construct_ssh_cmd.call_args
+ cylc_clean._remote_clean_cmd(id_, platform, rm_dirs, timeout='dunno')
+ args, _kwargs = mock_construct_ssh_cmd.call_args
constructed_cmd = args[0]
- assert constructed_cmd == ['clean', '--local-only', reg, *expected_args]
+ assert constructed_cmd == [
+ 'clean', '--local-only', '--no-scan', id_, *expected_args
+ ]
def test_clean_top_level(tmp_run_dir: Callable):
"""Test that cleaning last remaining run dir inside a workflow dir removes
the top level dir if it's empty (excluding _cylc-install)."""
# Setup
- reg = 'blue/planet/run1'
- run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
+ id_ = 'blue/planet/run1'
+ run_dir: Path = tmp_run_dir(id_, installed=True, named=True)
cylc_install_dir = run_dir.parent / WorkflowFiles.Install.DIRNAME
assert cylc_install_dir.is_dir()
runN_symlink = run_dir.parent / WorkflowFiles.RUN_N
assert runN_symlink.exists()
# Test
- clean(reg, run_dir)
+ clean(id_, run_dir)
assert not run_dir.parent.parent.exists()
# Now check that if the top level dir is not empty, it doesn't get removed
- run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
+ run_dir: Path = tmp_run_dir(id_, installed=True, named=True)
jellyfish_file = (run_dir.parent / 'jellyfish.txt')
jellyfish_file.touch()
- clean(reg, run_dir)
+ clean(id_, run_dir)
assert cylc_install_dir.is_dir()
assert jellyfish_file.exists()
@@ -1088,10 +1124,10 @@ def test_glob_in_run_dir(
"""
# Setup
cylc_run_dir: Path = tmp_run_dir()
- reg = 'foo/bar'
- run_dir = cylc_run_dir / reg
+ id_ = 'foo/bar'
+ run_dir = cylc_run_dir / id_
create_filetree(filetree, tmp_path, tmp_path)
- symlink_dirs = [run_dir / i for i in get_symlink_dirs(reg, run_dir)]
+ symlink_dirs = [run_dir / i for i in get_symlink_dirs(id_, run_dir)]
expected = [tmp_path / i for i in expected_matches]
# Test
assert glob_in_run_dir(run_dir, pattern, symlink_dirs) == expected
diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py
index 149c4b318a2..bb55cbf295e 100644
--- a/tests/unit/test_config.py
+++ b/tests/unit/test_config.py
@@ -14,7 +14,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from copy import deepcopy
import os
+import sys
from optparse import Values
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
from pathlib import Path
@@ -34,14 +36,16 @@
WorkflowConfigError,
XtriggerConfigError,
)
+from cylc.flow.parsec.exceptions import Jinja2Error, EmPyError
from cylc.flow.scheduler_cli import RunOptions
from cylc.flow.scripts.validate import ValidateOptions
+from cylc.flow.simulation import configure_sim_modes
from cylc.flow.workflow_files import WorkflowFiles
from cylc.flow.wallclock import get_utc_mode, set_utc_mode
from cylc.flow.xtrigger_mgr import XtriggerManager
from cylc.flow.task_outputs import (
TASK_OUTPUT_SUBMITTED,
- TASK_OUTPUT_SUCCEEDED
+ TASK_OUTPUT_SUCCEEDED,
)
from cylc.flow.cycling.iso8601 import ISO8601Point
@@ -54,13 +58,13 @@ def _tmp_flow_config(tmp_run_dir: Callable):
"""Create a temporary flow config file for use in init'ing WorkflowConfig.
Args:
- reg: Workflow name.
+ id_: Workflow name.
config: The flow file content.
Returns the path to the flow file.
"""
- def __tmp_flow_config(reg: str, config: str) -> Path:
- run_dir: Path = tmp_run_dir(reg)
+ def __tmp_flow_config(id_: str, config: str) -> Path:
+ run_dir: Path = tmp_run_dir(id_)
flow_file = run_dir / WorkflowFiles.FLOW_FILE
flow_file.write_text(config)
return flow_file
@@ -239,8 +243,8 @@ def test_family_inheritance_and_quotes(
hosts = localhost
'''
)
- reg = 'test'
- file_path = tmp_flow_config(reg, f'''
+ id_ = 'test'
+ file_path = tmp_flow_config(id_, f'''
[scheduler]
allow implicit tasks = True
[task parameters]
@@ -259,7 +263,7 @@ def test_family_inheritance_and_quotes(
inherit = 'MAINFAM', {fam_txt}
''')
config = WorkflowConfig(
- reg, file_path, template_vars={}, options=Values()
+ id_, file_path, template_vars={}, options=Values()
)
assert ('goodbye_0_major1_minor10' in
config.runtime['descendants']['MAINFAM_major1_minor10'])
@@ -341,6 +345,16 @@ def test_family_inheritance_and_quotes(
(WorkflowConfigError, "does not meet the constraints"),
id="Violated constraints"
),
+ pytest.param(
+ ISO8601_CYCLING_TYPE,
+ {
+ 'initial cycle point': 'a',
+ },
+ None,
+ None,
+ (WorkflowConfigError, 'Invalid ISO 8601 date representation: a'),
+ id="invalid"
+ ),
]
)
def test_process_icp(
@@ -780,8 +794,8 @@ def test_stopcp_after_fcp(
"""Test that setting a stop after cycle point that is beyond the final
cycle point is handled correctly."""
caplog.set_level(logging.WARNING, CYLC_LOG)
- reg = 'cassini'
- flow_file: Path = tmp_flow_config(reg, f"""
+ id_ = 'cassini'
+ flow_file: Path = tmp_flow_config(id_, f"""
[scheduler]
allow implicit tasks = True
[scheduling]
@@ -791,7 +805,7 @@ def test_stopcp_after_fcp(
[[graph]]
P1Y = huygens
""")
- cfg = WorkflowConfig(reg, flow_file, options=RunOptions(**opts))
+ cfg = WorkflowConfig(id_, flow_file, options=RunOptions(**opts))
msg = "will have no effect as it is after the final cycle point"
if warning_expected:
assert msg in caplog.text
@@ -992,8 +1006,8 @@ def _test(cp_tz, utc_mode, expected, expected_warnings=0):
def test_rsync_includes_will_not_accept_sub_directories(tmp_flow_config):
- reg = 'rsynctest'
- flow_file = tmp_flow_config(reg, """
+ id_ = 'rsynctest'
+ flow_file = tmp_flow_config(id_, """
[scheduling]
initial cycle point = 2020-01-01
[[dependencies]]
@@ -1004,15 +1018,86 @@ def test_rsync_includes_will_not_accept_sub_directories(tmp_flow_config):
with pytest.raises(WorkflowConfigError) as exc:
WorkflowConfig(
- workflow=reg, fpath=flow_file, options=Values()
+ workflow=id_, fpath=flow_file, options=Values()
)
assert "Directories can only be from the top level" in str(exc.value)
+@pytest.mark.parametrize(
+ 'cylc_var, expected_err',
+ [
+ ["CYLC_WORKFLOW_NAME", None],
+ ["CYLC_BEEF_WELLINGTON", (Jinja2Error, "is undefined")],
+ ]
+)
+def test_jinja2_cylc_vars(tmp_flow_config, cylc_var, expected_err):
+ """Defined CYLC_ variables should be available to Jinja2 during parsing.
+
+ This test is not located in the jinja2_support unit test module because
+ CYLC_ variables are only defined during workflow config parsing.
+ """
+ reg = 'nodule'
+ flow_file = tmp_flow_config(reg, """#!Jinja2
+ # {{""" + cylc_var + """}}
+ [scheduler]
+ allow implicit tasks = True
+ [scheduling]
+ [[graph]]
+ R1 = foo
+ """)
+ if expected_err is None:
+ WorkflowConfig(workflow=reg, fpath=flow_file, options=Values())
+ else:
+ with pytest.raises(expected_err[0]) as exc:
+ WorkflowConfig(workflow=reg, fpath=flow_file, options=Values())
+ assert expected_err[1] in str(exc)
+
+
+@pytest.mark.parametrize(
+ 'cylc_var, expected_err',
+ [
+ ["CYLC_WORKFLOW_NAME", None],
+ ["CYLC_BEEF_WELLINGTON", (EmPyError, "is not defined")],
+ ]
+)
+def test_empy_cylc_vars(tmp_flow_config, cylc_var, expected_err):
+ """Defined CYLC_ variables should be available to empy during parsing.
+
+ This test is not located in the empy_support unit test module because
+ CYLC_ variables are only defined during workflow config parsing.
+ """
+ reg = 'nodule'
+ flow_file = tmp_flow_config(reg, """#!empy
+ # @(""" + cylc_var + """)
+ [scheduler]
+ allow implicit tasks = True
+ [scheduling]
+ [[graph]]
+ R1 = foo
+ """)
+
+ # empy replaces sys.stdout with a "proxy". And pytest needs it for capture?
+ # (clue: "pytest --capture=no" avoids the error)
+ stdout = sys.stdout
+ sys.stdout._testProxy = lambda: ''
+ sys.stdout.pop = lambda _: ''
+ sys.stdout.push = lambda _: ''
+ sys.stdout.clear = lambda _: ''
+
+ if expected_err is None:
+ WorkflowConfig(workflow=reg, fpath=flow_file, options=Values())
+ else:
+ with pytest.raises(expected_err[0]) as exc:
+ WorkflowConfig(workflow=reg, fpath=flow_file, options=Values())
+ assert expected_err[1] in str(exc)
+
+ sys.stdout = stdout
+
+
def test_valid_rsync_includes_returns_correct_list(tmp_flow_config):
"""Test that the rsync includes in the correct """
- reg = 'rsynctest'
- flow_file = tmp_flow_config(reg, """
+ id_ = 'rsynctest'
+ flow_file = tmp_flow_config(id_, """
[scheduling]
initial cycle point = 2020-01-01
[[dependencies]]
@@ -1023,7 +1108,7 @@ def test_valid_rsync_includes_returns_correct_list(tmp_flow_config):
""")
config = WorkflowConfig(
- workflow=reg, fpath=flow_file, options=Values()
+ workflow=id_, fpath=flow_file, options=Values()
)
rsync_includes = WorkflowConfig.get_validated_rsync_includes(config)
@@ -1078,8 +1163,8 @@ def test_check_circular(opt, monkeypatch, caplog, tmp_flow_config):
if opt:
setattr(options, opt, True)
- reg = 'circular'
- flow_file = tmp_flow_config(reg, """
+ id_ = 'circular'
+ flow_file = tmp_flow_config(id_, """
[scheduling]
cycling mode = integer
[[graph]]
@@ -1091,7 +1176,7 @@ def test_check_circular(opt, monkeypatch, caplog, tmp_flow_config):
def WorkflowConfig__assert_err_raised():
with pytest.raises(WorkflowConfigError) as exc:
- WorkflowConfig(workflow=reg, fpath=flow_file, options=options)
+ WorkflowConfig(workflow=id_, fpath=flow_file, options=options)
assert "circular edges detected" in str(exc.value)
# ----- The actual test -----
@@ -1110,8 +1195,8 @@ def WorkflowConfig__assert_err_raised():
def test_undefined_custom_output(tmp_flow_config: Callable):
"""Test error on undefined custom output referenced in graph."""
- reg = 'custom_out1'
- flow_file = tmp_flow_config(reg, """
+ id_ = 'custom_out1'
+ flow_file = tmp_flow_config(id_, """
[scheduling]
[[graph]]
R1 = "foo:x => bar"
@@ -1120,14 +1205,14 @@ def test_undefined_custom_output(tmp_flow_config: Callable):
""")
with pytest.raises(WorkflowConfigError) as cm:
- WorkflowConfig(workflow=reg, fpath=flow_file, options=Values())
+ WorkflowConfig(workflow=id_, fpath=flow_file, options=Values())
assert "Undefined custom output" in str(cm.value)
def test_invalid_custom_output_msg(tmp_flow_config: Callable):
"""Test invalid output message (colon not allowed)."""
- reg = 'invalid_output'
- flow_file = tmp_flow_config(reg, """
+ id_ = 'invalid_output'
+ flow_file = tmp_flow_config(id_, """
[scheduling]
[[graph]]
R1 = "foo:x => bar"
@@ -1140,9 +1225,9 @@ def test_invalid_custom_output_msg(tmp_flow_config: Callable):
with pytest.raises(WorkflowConfigError) as cm:
WorkflowConfig(
- workflow=reg, fpath=flow_file, options=Values())
+ workflow=id_, fpath=flow_file, options=Values())
assert (
- 'Invalid message trigger "[runtime][foo][outputs]x = '
+ 'Invalid task message "[runtime][foo][outputs]x = '
'the quick: brown fox"'
) in str(cm.value)
@@ -1155,8 +1240,8 @@ def test_c7_back_compat_optional_outputs(tmp_flow_config, monkeypatch):
"""
monkeypatch.setattr('cylc.flow.flags.cylc7_back_compat', True)
- reg = 'custom_out2'
- flow_file = tmp_flow_config(reg, '''
+ id_ = 'custom_out2'
+ flow_file = tmp_flow_config(id_, '''
[scheduling]
[[graph]]
R1 = """
@@ -1171,7 +1256,7 @@ def test_c7_back_compat_optional_outputs(tmp_flow_config, monkeypatch):
x = x
''')
- cfg = WorkflowConfig(workflow=reg, fpath=flow_file, options=None)
+ cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None)
for taskdef in cfg.taskdefs.values():
for output, (_, required) in taskdef.outputs.items():
@@ -1191,8 +1276,8 @@ def test_c7_back_compat_optional_outputs(tmp_flow_config, monkeypatch):
)
def test_implicit_success_required(tmp_flow_config, graph):
"""Check foo:succeed is required if success/fail not used in the graph."""
- reg = 'blargh'
- flow_file = tmp_flow_config(reg, f"""
+ id_ = 'blargh'
+ flow_file = tmp_flow_config(id_, f"""
[scheduling]
[[graph]]
R1 = {graph}
@@ -1202,7 +1287,7 @@ def test_implicit_success_required(tmp_flow_config, graph):
[[[outputs]]]
x = "the quick brown fox"
""")
- cfg = WorkflowConfig(workflow=reg, fpath=flow_file, options=None)
+ cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None)
assert cfg.taskdefs['foo'].outputs[TASK_OUTPUT_SUCCEEDED][1]
@@ -1215,8 +1300,8 @@ def test_implicit_success_required(tmp_flow_config, graph):
)
def test_success_after_optional_submit(tmp_flow_config, graph):
"""Check foo:succeed is not required if foo:submit is optional."""
- reg = 'blargh'
- flow_file = tmp_flow_config(reg, f"""
+ id_ = 'blargh'
+ flow_file = tmp_flow_config(id_, f"""
[scheduling]
[[graph]]
R1 = {graph}
@@ -1224,7 +1309,7 @@ def test_success_after_optional_submit(tmp_flow_config, graph):
[[bar]]
[[foo]]
""")
- cfg = WorkflowConfig(workflow=reg, fpath=flow_file, options=None)
+ cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None)
assert not cfg.taskdefs['foo'].outputs[TASK_OUTPUT_SUCCEEDED][1]
@@ -1281,8 +1366,8 @@ def test_implicit_tasks(
implicit tasks in the err msg.
"""
# Setup
- reg = 'rincewind'
- flow_file: Path = tmp_flow_config(reg, f"""
+ id_ = 'rincewind'
+ flow_file: Path = tmp_flow_config(id_, f"""
[scheduler]
{
f'allow implicit tasks = {allow_implicit_tasks}'
@@ -1302,7 +1387,7 @@ def test_implicit_tasks(
expected_exc = WorkflowConfigError
extra_msg_expected &= (allow_implicit_tasks is None)
# Test
- args: dict = {'workflow': reg, 'fpath': flow_file, 'options': None}
+ args: dict = {'workflow': id_, 'fpath': flow_file, 'options': None}
expected_msg = r"implicit tasks detected.*"
if expected_exc:
with pytest.raises(expected_exc, match=expected_msg) as excinfo:
@@ -1385,8 +1470,8 @@ def test_zero_interval(
):
"""Test that a zero-duration recurrence with >1 repetition gets an
appropriate warning."""
- reg = 'ordinary'
- flow_file: Path = tmp_flow_config(reg, f"""
+ id_ = 'ordinary'
+ flow_file: Path = tmp_flow_config(id_, f"""
[scheduler]
UTC mode = True
allow implicit tasks = True
@@ -1396,7 +1481,7 @@ def test_zero_interval(
[[graph]]
{recurrence} = slidescape36
""")
- WorkflowConfig(reg, flow_file, options=opts)
+ WorkflowConfig(id_, flow_file, options=opts)
logged = log_filter(
caplog,
level=logging.WARNING,
@@ -1433,8 +1518,8 @@ def test_chain_expr(
Note the order matters when "nominal" units (years, months) are used.
"""
- reg = 'osgiliath'
- flow_file: Path = tmp_flow_config(reg, f"""
+ id_ = 'osgiliath'
+ flow_file: Path = tmp_flow_config(id_, f"""
[scheduler]
UTC mode = True
allow implicit tasks = True
@@ -1444,7 +1529,7 @@ def test_chain_expr(
[[graph]]
P1D = faramir
""")
- cfg = WorkflowConfig(reg, flow_file, options=ValidateOptions())
+ cfg = WorkflowConfig(id_, flow_file, options=ValidateOptions())
assert cfg.final_point == ISO8601Point(expected_fcp).standardise()
@@ -1497,8 +1582,8 @@ def test_check_for_owner(runtime_cfg):
@pytest.fixture(scope='module')
def awe_config(mod_tmp_flow_config: Callable) -> WorkflowConfig:
"""Return a workflow config object."""
- reg = 'awe'
- flow_file = mod_tmp_flow_config(reg, '''
+ id_ = 'awe'
+ flow_file = mod_tmp_flow_config(id_, '''
[scheduling]
cycling mode = integer
[[graph]]
@@ -1512,7 +1597,7 @@ def awe_config(mod_tmp_flow_config: Callable) -> WorkflowConfig:
inherit = MOON
''')
return WorkflowConfig(
- workflow=reg, fpath=flow_file, options=ValidateOptions()
+ workflow=id_, fpath=flow_file, options=ValidateOptions()
)
@@ -1658,3 +1743,31 @@ def test_cylc_env_at_parsing(
assert var in cylc_env
else:
assert var not in cylc_env
+
+
+def test_configure_sim_mode(caplog):
+ job_section = {}
+ sim_section = {
+ 'speedup factor': '',
+ 'default run length': 'PT10S',
+ 'time limit buffer': 'PT0S',
+ 'fail try 1 only': False,
+ 'fail cycle points': '',
+ }
+ rtconfig_1 = {
+ 'execution time limit': '',
+ 'simulation': sim_section,
+ 'job': job_section,
+ 'outputs': {},
+ }
+ rtconfig_2 = deepcopy(rtconfig_1)
+ rtconfig_2['simulation']['default run length'] = 'PT2S'
+
+ taskdefs = [
+ SimpleNamespace(rtconfig=rtconfig_1),
+ SimpleNamespace(rtconfig=rtconfig_2),
+ ]
+ configure_sim_modes(taskdefs, 'simulation')
+ results = [
+ i.rtconfig['simulation']['simulated run length'] for i in taskdefs]
+ assert results == [10.0, 2.0]
diff --git a/tests/unit/test_config_upgrader.py b/tests/unit/test_config_upgrader.py
index b8261150edd..22e833a3449 100644
--- a/tests/unit/test_config_upgrader.py
+++ b/tests/unit/test_config_upgrader.py
@@ -108,7 +108,7 @@ def _cfg(dic):
@pytest.mark.parametrize(
'macp, rlim',
- [(16, 'P16'),
+ [(16, 'P15'),
('', '')]
)
def test_upgrade_max_active_cycle_points(macp, rlim):
diff --git a/tests/unit/test_graph_parser.py b/tests/unit/test_graph_parser.py
index 97b7fb45483..ddd443a3597 100644
--- a/tests/unit/test_graph_parser.py
+++ b/tests/unit/test_graph_parser.py
@@ -16,6 +16,7 @@
"""Unit tests for the GraphParser."""
import logging
+from typing import Dict, List
import pytest
from itertools import product
from pytest import param
@@ -86,45 +87,59 @@ def test_graph_syntax_errors_2(seq, graph, expected_err):
@pytest.mark.parametrize(
'graph, expected_err',
[
- [
+ (
"a b => c",
"Bad graph node format"
- ],
- [
+ ),
+ (
+ "a => b c",
+ "Bad graph node format"
+ ),
+ (
"!foo => bar",
"Suicide markers must be on the right of a trigger:"
- ],
- [
+ ),
+ (
"( foo & bar => baz",
- "Mismatched parentheses in:"
- ],
- [
+ 'Mismatched parentheses in: "(foo&bar"'
+ ),
+ (
+ "a => b & c)",
+ 'Mismatched parentheses in: "b&c)"'
+ ),
+ (
+ "(a => b & c)",
+ 'Mismatched parentheses in: "(a"'
+ ),
+ (
+ "(a => b[+P1]",
+ 'Mismatched parentheses in: "(a"'
+ ),
+ (
"""(a | b & c) => d
foo => bar
(a | b & c) => !d""",
"can't trigger both d and !d"
- ],
- [
+ ),
+ (
"a => b | c",
"Illegal OR on right side"
- ],
- [
+ ),
+ (
"foo && bar => baz",
"The graph AND operator is '&'"
- ],
- [
+ ),
+ (
"foo || bar => baz",
"The graph OR operator is '|'"
- ],
+ ),
]
)
def test_graph_syntax_errors(graph, expected_err):
"""Test various graph syntax errors."""
with pytest.raises(GraphParseError) as cm:
GraphParser().parse_graph(graph)
- assert (
- expected_err in str(cm.value)
- )
+ assert expected_err in str(cm.value)
def test_parse_graph_simple():
@@ -845,3 +860,39 @@ def test_fail_family_triggers_on_tasks(ftrig):
"family trigger on non-family namespace"
)
)
+
+
+@pytest.mark.parametrize(
+ 'graph, expected_triggers',
+ [
+ param(
+ 'a => b & c',
+ {'a': [''], 'b': ['a:succeeded'], 'c': ['a:succeeded']},
+ id="simple"
+ ),
+ param(
+ 'a => (b & c)',
+ {'a': [''], 'b': ['a:succeeded'], 'c': ['a:succeeded']},
+ id="simple w/ parentheses"
+ ),
+ param(
+ 'a => (b & (c & d))',
+ {
+ 'a': [''],
+ 'b': ['a:succeeded'],
+ 'c': ['a:succeeded'],
+ 'd': ['a:succeeded'],
+ },
+ id="more parentheses"
+ ),
+ ]
+)
+def test_RHS_AND(graph: str, expected_triggers: Dict[str, List[str]]):
+ """Test '&' operator on right hand side of trigger expression."""
+ gp = GraphParser()
+ gp.parse_graph(graph)
+ triggers = {
+ task: list(trigs.keys())
+ for task, trigs in gp.triggers.items()
+ }
+ assert triggers == expected_triggers
diff --git a/tests/unit/test_id_cli.py b/tests/unit/test_id_cli.py
index 4f1099b164b..9642a3c9874 100644
--- a/tests/unit/test_id_cli.py
+++ b/tests/unit/test_id_cli.py
@@ -35,9 +35,8 @@
@pytest.fixture
-def mock_exists(mocker):
- mock_exists = mocker.patch('pathlib.Path.exists')
- mock_exists.return_value = True
+def mock_exists(monkeypatch: pytest.MonkeyPatch):
+ monkeypatch.setattr('pathlib.Path.exists', lambda *a, **k: True)
@pytest.fixture(scope='module')
@@ -570,7 +569,7 @@ async def _scan():
# something that looks like scan but doesn't do anything
yield
- monkeypatch.setattr('cylc.flow.id_cli.scan', _scan)
+ monkeypatch.setattr('cylc.flow.network.scan.scan', _scan)
async def test_expand_workflow_tokens_impl_selector(no_scan):
diff --git a/tests/unit/test_indep_task_queues.py b/tests/unit/test_indep_task_queues.py
index a0a1894cece..d144f58eecf 100644
--- a/tests/unit/test_indep_task_queues.py
+++ b/tests/unit/test_indep_task_queues.py
@@ -21,8 +21,8 @@
import pytest
+from cylc.flow.task_proxy import TaskProxy
from cylc.flow.task_queues.independent import IndepQueueManager
-from cylc.flow.task_state import TASK_STATUS_PREPARING
MEMBERS = {"a", "b", "c", "d", "e", "f"}
@@ -61,9 +61,7 @@
@pytest.mark.parametrize(
- "active,"
- "expected_released,"
- "expected_foo_groups",
+ "active, expected_released, expected_foo_groups",
[
(
Counter(["b1", "b2", "s1", "o1"]),
@@ -73,28 +71,24 @@
]
)
def test_queue_and_release(
- active,
- expected_released,
- expected_foo_groups):
+ active,
+ expected_released,
+ expected_foo_groups
+):
"""Test task queue and release."""
# configure the queue
queue_mgr = IndepQueueManager(QCONFIG, ALL_TASK_NAMES, DESCENDANTS)
# add newly ready tasks to the queue
for name in READY_TASK_NAMES:
- itask = Mock()
+ itask = Mock(spec=TaskProxy)
itask.tdef.name = name
itask.state.is_held = False
queue_mgr.push_task(itask)
# release tasks, given current active task counter
released = queue_mgr.release_tasks(active)
- assert sorted([r.tdef.name for r in released]) == sorted(expected_released)
-
- # check released tasks change state to "preparing", and not is_queued
- for r in released:
- assert r.state.reset.called_with(TASK_STATUS_PREPARING)
- assert r.state.reset.called_with(is_queued=False)
+ assert sorted(r.tdef.name for r in released) == sorted(expected_released)
# check that adopted orphans end up in the default queue
orphans = ["orphan1", "orphan2"]
diff --git a/tests/unit/test_install.py b/tests/unit/test_install.py
index e3dc8c876da..1b8ad505d38 100644
--- a/tests/unit/test_install.py
+++ b/tests/unit/test_install.py
@@ -136,8 +136,8 @@ def test_install_workflow__symlink_target_exists(
):
"""Test that you can't install workflow when run dir symlink dir target
already exists."""
- reg = 'smeagol'
- src_dir: Path = tmp_src_dir(reg)
+ id_ = 'smeagol'
+ src_dir: Path = tmp_src_dir(id_)
tmp_run_dir()
sym_run = tmp_path / 'sym-run'
sym_log = tmp_path / 'sym-log'
@@ -153,13 +153,13 @@ def test_install_workflow__symlink_target_exists(
)
msg = "Symlink dir target already exists: .*{}"
# Test:
- (sym_run / 'cylc-run' / reg / 'run1').mkdir(parents=True)
+ (sym_run / 'cylc-run' / id_ / 'run1').mkdir(parents=True)
with pytest.raises(WorkflowFilesError, match=msg.format(sym_run)):
install_workflow(src_dir)
shutil.rmtree(sym_run)
(
- sym_log / 'cylc-run' / reg / 'run1' / WorkflowFiles.LogDir.DIRNAME
+ sym_log / 'cylc-run' / id_ / 'run1' / WorkflowFiles.LogDir.DIRNAME
).mkdir(parents=True)
with pytest.raises(WorkflowFilesError, match=msg.format(sym_log)):
install_workflow(src_dir)
diff --git a/tests/unit/test_job_file.py b/tests/unit/test_job_file.py
index 92be2d167c1..9dbfa58f3a1 100644
--- a/tests/unit/test_job_file.py
+++ b/tests/unit/test_job_file.py
@@ -31,7 +31,6 @@
)
from cylc.flow.job_file import (
JobFileWriter,
- MAX_CYLC_TASK_DEPENDENCIES_LEN,
)
from cylc.flow.platforms import platform_from_name
@@ -398,7 +397,6 @@ def test_write_task_environment():
'export CYLC_TASK_COMMS_METHOD=ssh\n '
'export CYLC_TASK_JOB="1/moo/01"\n export '
'CYLC_TASK_NAMESPACE_HIERARCHY="baa moo"\n export '
- 'CYLC_TASK_DEPENDENCIES="moo neigh quack"\n export '
'CYLC_TASK_TRY_NUMBER=1\n export '
'CYLC_TASK_FLOW_NUMBERS=1\n export '
'CYLC_TASK_PARAM_duck="quack"\n export '
@@ -537,46 +535,3 @@ def test_homeless_platform(fixture_get_platform):
if 'HOME' in job_sh_txt:
raise Exception('$HOME found in job.sh\n{job_sh_txt}')
-
-def test_cylc_task_dependencies_length():
- f"""Test CYLC_TASK_DEPENDENCIES variable toggle.
-
- The CYLC_TASK_DEPENDENCIES variriable should only be exported if there are
- { MAX_CYLC_TASK_DEPENDENCIES_LEN } or fewer dependencies.
-
- See: https://github.com/cylc/cylc-flow/issues/5551
-
- """
- job_conf = {
- 'platform': {'communication method': 'zmq'},
- 'job_d': 'a/b/c',
- 'namespace_hierarchy': ['a', 'b'],
- # the maximum permitted number of dependencies before the environment
- # variable is omitted
- 'dependencies': ['a'] * (MAX_CYLC_TASK_DEPENDENCIES_LEN),
- 'try_num': 1,
- 'flow_nums': {1},
- 'param_var': {},
- 'work_d': 'b/c/d',
- }
-
- # write the job environment
- with io.StringIO() as fake_file:
- JobFileWriter()._write_task_environment(fake_file, job_conf)
- output = fake_file.getvalue()
-
- # assert the env var is exported
- lines = [line.strip().split('=')[0] for line in output.splitlines()]
- assert 'export CYLC_TASK_DEPENDENCIES' in lines
-
- # add an extra dependency to push it over the limit
- job_conf['dependencies'] += ['b']
-
- # write the job environment
- with io.StringIO() as fake_file:
- JobFileWriter()._write_task_environment(fake_file, job_conf)
- output = fake_file.getvalue()
-
- # assert the env var is redacted
- lines = [line.strip().split('=')[0] for line in output.splitlines()]
- assert '# CYLC_TASK_DEPENDENCIES' in lines # var should be commented out
diff --git a/tests/unit/test_pathutil.py b/tests/unit/test_pathutil.py
index d42b26a2dbe..6ed9e9ec120 100644
--- a/tests/unit/test_pathutil.py
+++ b/tests/unit/test_pathutil.py
@@ -438,21 +438,21 @@ def test_remove_empty_parents(tmp_path: Path):
"""Test that _remove_empty_parents() doesn't remove parents containing a
sibling."""
# -- Setup --
- reg = 'foo/bar/baz/qux'
- path = tmp_path.joinpath(reg)
+ id_ = 'foo/bar/baz/qux'
+ path = tmp_path.joinpath(id_)
tmp_path.joinpath('foo/bar/baz').mkdir(parents=True)
# Note qux does not exist, but that shouldn't matter
sibling_reg = 'foo/darmok'
sibling_path = tmp_path.joinpath(sibling_reg)
sibling_path.mkdir()
# -- Test --
- remove_empty_parents(path, reg)
+ remove_empty_parents(path, id_)
assert tmp_path.joinpath('foo/bar').exists() is False
assert tmp_path.joinpath('foo').exists() is True
# Check it skips non-existent dirs, and stops at the right place too
tmp_path.joinpath('foo/bar').mkdir()
sibling_path.rmdir()
- remove_empty_parents(path, reg)
+ remove_empty_parents(path, id_)
assert tmp_path.joinpath('foo').exists() is False
assert tmp_path.exists() is True
diff --git a/tests/unit/test_pipe_poller.py b/tests/unit/test_pipe_poller.py
new file mode 100644
index 00000000000..a41e9635c6b
--- /dev/null
+++ b/tests/unit/test_pipe_poller.py
@@ -0,0 +1,33 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from subprocess import Popen, PIPE
+
+from cylc.flow.pipe_poller import pipe_poller
+
+
+def test_pipe_poller_str():
+ proc = Popen(['echo', 'Hello World!'], stdout=PIPE, text=True)
+ (stdout,) = pipe_poller(proc, proc.stdout)
+ assert proc.returncode == 0
+ assert stdout == 'Hello World!\n'
+
+
+def test_pipe_poller_bytes():
+ proc = Popen(['echo', 'Hello World!'], stdout=PIPE, text=False)
+ (stdout,) = pipe_poller(proc, proc.stdout)
+ assert proc.returncode == 0
+ assert stdout == b'Hello World!\n'
diff --git a/tests/unit/test_remote.py b/tests/unit/test_remote.py
index 7be01de2e20..5982fb746f1 100644
--- a/tests/unit/test_remote.py
+++ b/tests/unit/test_remote.py
@@ -15,7 +15,15 @@
# along with this program. If not, see .
"""Test the cylc.flow.remote module."""
-from cylc.flow.remote import run_cmd, construct_rsync_over_ssh_cmd
+import os
+from unittest import mock
+
+import pytest
+
+from cylc.flow.remote import (
+ run_cmd, construct_rsync_over_ssh_cmd, construct_ssh_cmd
+)
+import cylc.flow
def test_run_cmd_stdin_str():
@@ -86,3 +94,31 @@ def test_construct_rsync_over_ssh_cmd():
'/foo/',
'miklegard:/bar/',
]
+
+
+def test_construct_ssh_cmd_forward_env(monkeypatch: pytest.MonkeyPatch):
+ """ Test for 'ssh forward environment variables'
+ """
+ # Clear CYLC_* env vars as these will show up in the command
+ for env_var in os.environ:
+ if env_var.startswith('CYLC'):
+ monkeypatch.delenv(env_var)
+
+ host = 'example.com'
+ config = {
+ 'ssh command': 'ssh',
+ 'use login shell': None,
+ 'cylc path': None,
+ 'ssh forward environment variables': ['FOO', 'BAZ'],
+ }
+
+ # Variable isn't set, no change to command
+ expect = ['ssh', host, 'env', f'CYLC_VERSION={cylc.flow.__version__}', 'cylc', 'play']
+ cmd = construct_ssh_cmd(['play'], config, host)
+ assert cmd == expect
+
+ # Variable is set, appears in `env` list
+ monkeypatch.setenv('FOO', 'BAR')
+ expect = ['ssh', host, 'env', f'CYLC_VERSION={cylc.flow.__version__}', 'FOO=BAR', 'cylc', 'play']
+ cmd = construct_ssh_cmd(['play'], config, host)
+ assert cmd == expect
diff --git a/tests/unit/test_simulation.py b/tests/unit/test_simulation.py
new file mode 100644
index 00000000000..1c490f35c16
--- /dev/null
+++ b/tests/unit/test_simulation.py
@@ -0,0 +1,166 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+"""Tests for utilities supporting simulation and skip modes
+"""
+import pytest
+from pytest import param
+
+from cylc.flow.cycling.integer import IntegerPoint
+from cylc.flow.cycling.iso8601 import ISO8601Point
+from cylc.flow.simulation import (
+ parse_fail_cycle_points,
+ build_dummy_script,
+ disable_platforms,
+ get_simulated_run_len,
+ sim_task_failed,
+)
+
+
+@pytest.mark.parametrize(
+ 'execution_time_limit, speedup_factor, default_run_length',
+ (
+ param(None, None, 'PT1H', id='default-run-length'),
+ param(None, 10, 'PT1H', id='speedup-factor-alone'),
+ param('PT1H', None, 'PT1H', id='execution-time-limit-alone'),
+ param('P1D', 24, 'PT1M', id='speed-up-and-execution-tl'),
+ )
+)
+def test_get_simulated_run_len(
+ execution_time_limit, speedup_factor, default_run_length
+):
+ """Test the logic of the presence or absence of config items.
+
+ Avoid testing the correct workign of DurationParser.
+ """
+ rtc = {
+ 'execution time limit': execution_time_limit,
+ 'simulation': {
+ 'speedup factor': speedup_factor,
+ 'default run length': default_run_length
+ }
+ }
+ assert get_simulated_run_len(rtc) == 3600
+
+
+@pytest.mark.parametrize(
+ 'fail_one_time_only', (True, False)
+)
+def test_set_simulation_script(fail_one_time_only):
+ rtc = {
+ 'outputs': {'foo': '1', 'bar': '2'},
+ 'simulation': {
+ 'fail try 1 only': fail_one_time_only,
+ 'fail cycle points': '1',
+ }
+ }
+ result = build_dummy_script(rtc, 60)
+ assert result.split('\n') == [
+ 'sleep 60',
+ "cylc message '1'",
+ "cylc message '2'",
+ f"cylc__job__dummy_result {str(fail_one_time_only).lower()}"
+ " 1 || exit 1"
+ ]
+
+
+@pytest.mark.parametrize(
+ 'rtc, expect', (
+ ({'platform': 'skarloey'}, 'localhost'),
+ ({'remote': {'host': 'rheneas'}}, 'localhost'),
+ ({'job': {'batch system': 'loaf'}}, 'localhost'),
+ )
+)
+def test_disable_platforms(rtc, expect):
+ """A sampling of items FORBIDDEN_WITH_PLATFORMS are removed from a
+ config passed to this method.
+ """
+ disable_platforms(rtc)
+ assert rtc['platform'] == expect
+ subdicts = [v for v in rtc.values() if isinstance(v, dict)]
+ for subdict in subdicts:
+ for k, val in subdict.items():
+ if k != 'platform':
+ assert val is None
+
+
+def test_parse_fail_cycle_points(set_cycling_type):
+ before = ['2', '4']
+ set_cycling_type()
+ assert parse_fail_cycle_points(before) == [
+ IntegerPoint(i) for i in before
+ ]
+
+
+@pytest.mark.parametrize(
+ 'conf, point, try_, expect',
+ (
+ param(
+ {'fail cycle points': [], 'fail try 1 only': True},
+ ISO8601Point('1'),
+ 1,
+ False,
+ id='defaults'
+ ),
+ param(
+ {'fail cycle points': None, 'fail try 1 only': True},
+ ISO8601Point('1066'),
+ 1,
+ True,
+ id='fail-all'
+ ),
+ param(
+ {
+ 'fail cycle points': [
+ ISO8601Point('1066'), ISO8601Point('1067')],
+ 'fail try 1 only': True
+ },
+ ISO8601Point('1067'),
+ 1,
+ True,
+ id='point-in-failCP'
+ ),
+ param(
+ {
+ 'fail cycle points': [
+ ISO8601Point('1066'), ISO8601Point('1067')],
+ 'fail try 1 only': True
+ },
+ ISO8601Point('1000'),
+ 1,
+ False,
+ id='point-notin-failCP'
+ ),
+ param(
+ {'fail cycle points': None, 'fail try 1 only': True},
+ ISO8601Point('1066'),
+ 2,
+ False,
+ id='succeed-attempt2'
+ ),
+ param(
+ {'fail cycle points': None, 'fail try 1 only': False},
+ ISO8601Point('1066'),
+ 7,
+ True,
+ id='fail-attempt7'
+ ),
+ )
+)
+def test_sim_task_failed(
+ conf, point, try_, expect, set_cycling_type
+):
+ set_cycling_type('iso8601')
+ assert sim_task_failed(conf, point, try_) == expect
diff --git a/tests/unit/test_subprocpool.py b/tests/unit/test_subprocpool.py
index 7af9738afdf..981ca8e75f9 100644
--- a/tests/unit/test_subprocpool.py
+++ b/tests/unit/test_subprocpool.py
@@ -25,9 +25,12 @@
from types import SimpleNamespace
from cylc.flow import LOG
+from cylc.flow.id import Tokens
+from cylc.flow.cycling.iso8601 import ISO8601Point
from cylc.flow.task_events_mgr import TaskJobLogsRetrieveContext
from cylc.flow.subprocctx import SubProcContext
from cylc.flow.subprocpool import SubProcPool, _XTRIG_FUNCS, get_func
+from cylc.flow.task_proxy import TaskProxy
class TestSubProcPool(unittest.TestCase):
@@ -312,6 +315,32 @@ def test__run_command_exit_add_to_badhosts(mock_ctx):
assert badhosts == {'foo', 'bar', 'mouse'}
+def test__run_command_exit_add_to_badhosts_log(caplog, mock_ctx):
+ """It gets platform name from the callback args.
+ """
+ badhosts = {'foo', 'bar'}
+ SubProcPool._run_command_exit(
+ mock_ctx(cmd=['ssh']),
+ bad_hosts=badhosts,
+ callback=lambda x, t: print(str(x)),
+ callback_args=[TaskProxy(
+ Tokens('~u/w//c/t/2'),
+ SimpleNamespace(
+ name='t', dependencies={}, sequential='',
+ external_triggers=[], xtrig_labels={},
+ outputs={
+ 'submitted': [None, None], 'submit-failed': [None, None]
+ },
+ graph_children={}, rtconfig={'platform': 'foo'}
+
+ ),
+ ISO8601Point('1990')
+ )]
+ )
+ assert 'platform: foo' in caplog.records[0].message
+ assert badhosts == {'foo', 'bar', 'mouse'}
+
+
def test__run_command_exit_rsync_fails(mock_ctx):
"""It updates the list of badhosts
"""
diff --git a/tests/unit/test_task_proxy.py b/tests/unit/test_task_proxy.py
index 5369e70f124..98695ecd13f 100644
--- a/tests/unit/test_task_proxy.py
+++ b/tests/unit/test_task_proxy.py
@@ -60,9 +60,10 @@ def test_get_clock_trigger_time(
set_cycling_type(itask_point.TYPE)
mock_itask = Mock(
point=itask_point.standardise(),
- clock_trigger_time=None
+ clock_trigger_times={}
)
- assert TaskProxy.get_clock_trigger_time(mock_itask, offset_str) == expected
+ assert TaskProxy.get_clock_trigger_time(
+ mock_itask, mock_itask.point, offset_str) == expected
@pytest.mark.parametrize(
diff --git a/tests/unit/test_task_remote_mgr.py b/tests/unit/test_task_remote_mgr.py
index dab97b2d7ce..c41e415eba3 100644
--- a/tests/unit/test_task_remote_mgr.py
+++ b/tests/unit/test_task_remote_mgr.py
@@ -43,9 +43,9 @@ def test__remote_init_items(comms_meth: CommsMeth, expected: bool):
Should only includes files under .service/
"""
- reg = 'barclay'
- mock_mgr = Mock(workflow=reg)
- srv_dir = get_workflow_srv_dir(reg)
+ id_ = 'barclay'
+ mock_mgr = Mock(workflow=id_)
+ srv_dir = get_workflow_srv_dir(id_)
items = TaskRemoteMgr._remote_init_items(mock_mgr, comms_meth)
if expected:
assert items
diff --git a/tests/unit/test_taskdef.py b/tests/unit/test_taskdef.py
index b58bab8529f..550e01f5f15 100644
--- a/tests/unit/test_taskdef.py
+++ b/tests/unit/test_taskdef.py
@@ -24,9 +24,9 @@
def test_generate_graph_parents_1(tmp_flow_config):
"""Test that parents are only generated from valid recurrences."""
- reg = 'pan-galactic'
+ id_ = 'pan-galactic'
flow_file = tmp_flow_config(
- reg,
+ id_,
f"""
[scheduler]
UTC mode = True
@@ -41,7 +41,7 @@ def test_generate_graph_parents_1(tmp_flow_config):
[[every_cycle, run_once_at_midnight]]
"""
)
- cfg = WorkflowConfig(workflow=reg, fpath=flow_file, options=None)
+ cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None)
# Each instance of every_cycle should have a parent only at T00.
for point in [
@@ -65,9 +65,9 @@ def test_generate_graph_parents_1(tmp_flow_config):
def test_generate_graph_parents_2(tmp_flow_config):
"""Test inferred parents are valid w.r.t to their own recurrences."""
- reg = 'gargle-blaster'
+ id_ = 'gargle-blaster'
flow_file = tmp_flow_config(
- reg,
+ id_,
f"""
[scheduling]
cycling mode = integer
@@ -77,7 +77,7 @@ def test_generate_graph_parents_2(tmp_flow_config):
[[foo]]
"""
)
- cfg = WorkflowConfig(workflow=reg, fpath=flow_file, options=None)
+ cfg = WorkflowConfig(workflow=id_, fpath=flow_file, options=None)
# Each instance of every_cycle should have a parent only at T00.
parents = generate_graph_parents(
diff --git a/tests/unit/test_workflow_db_mgr.py b/tests/unit/test_workflow_db_mgr.py
index f47cc150da6..48de335ac4c 100644
--- a/tests/unit/test_workflow_db_mgr.py
+++ b/tests/unit/test_workflow_db_mgr.py
@@ -26,6 +26,7 @@
CylcWorkflowDAO,
WorkflowDatabaseManager,
)
+from cylc.flow.dbstatecheck import CylcWorkflowDBChecker
@pytest.fixture
@@ -116,3 +117,22 @@ def test_check_workflow_db_compat(_setup_db, capsys):
with pytest.raises(ServiceFileError, match='99.99'):
WorkflowDatabaseManager.check_db_compatibility(pri_path)
+
+
+def test_cylc_7_db_wflow_params_table(_setup_db):
+ """Test back-compat needed by workflow state xtrigger for Cylc 7 DBs."""
+ ptformat = "CCYY"
+ create = r'CREATE TABLE suite_params(key TEXT, value TEXT)'
+ insert = (
+ r'INSERT INTO suite_params VALUES'
+ rf'("cycle_point_format", "{ptformat}")'
+ )
+ db_file_name = _setup_db([create, insert])
+ checker = CylcWorkflowDBChecker('foo', 'bar', db_path=db_file_name)
+
+ with pytest.raises(
+ sqlite3.OperationalError, match="no such table: workflow_params"
+ ):
+ checker.get_remote_point_format()
+
+ assert checker.get_remote_point_format_compat() == ptformat
diff --git a/tests/unit/test_workflow_files.py b/tests/unit/test_workflow_files.py
index 90fe3327845..b2b33e495aa 100644
--- a/tests/unit/test_workflow_files.py
+++ b/tests/unit/test_workflow_files.py
@@ -93,7 +93,7 @@ def test_is_valid_run_dir(is_abs_path: bool, tmp_run_dir: Callable):
@pytest.mark.parametrize(
- 'reg, expected_err, expected_msg',
+ 'id_, expected_err, expected_msg',
[('foo/bar/', None, None),
('/foo/bar', WorkflowFilesError, "cannot be an absolute path"),
('$HOME/alone', WorkflowFilesError, "invalid workflow name"),
@@ -101,14 +101,14 @@ def test_is_valid_run_dir(is_abs_path: bool, tmp_run_dir: Callable):
('meow/..', WorkflowFilesError,
"cannot be a path that points to the cylc-run directory or above")]
)
-def test_validate_workflow_name(reg, expected_err, expected_msg):
+def test_validate_workflow_name(id_, expected_err, expected_msg):
if expected_err:
with pytest.raises(expected_err) as exc:
- validate_workflow_name(reg)
+ validate_workflow_name(id_)
if expected_msg:
assert expected_msg in str(exc.value)
else:
- validate_workflow_name(reg)
+ validate_workflow_name(id_)
@pytest.mark.parametrize(
@@ -169,7 +169,7 @@ def test_infer_latest_run(
Params:
path: Input arg.
implicit_runN: Input arg.
- expected_reg: The reg part of the expected returned tuple.
+ expected_reg: The id_ part of the expected returned tuple.
"""
# Setup
cylc_run_dir: Path = tmp_run_dir()
@@ -307,11 +307,11 @@ def test_get_symlink_dirs(
# Setup
cylc_run_dir = tmp_run_dir()
create_filetree(filetree, tmp_path, tmp_path)
- reg = 'foo/bar'
+ id_ = 'foo/bar'
for k, v in expected.items():
expected[k] = Path(tmp_path / v)
# Test
- assert get_symlink_dirs(reg, cylc_run_dir / reg) == expected
+ assert get_symlink_dirs(id_, cylc_run_dir / id_) == expected
@@ -484,14 +484,14 @@ def test_check_flow_file_symlink(
@pytest.mark.parametrize(
- 'reg, installed, named, expected',
+ 'id_, installed, named, expected',
[('reg1/run1', True, True, True),
('reg2', True, False, True),
('reg3', False, False, False)]
)
-def test_is_installed(tmp_run_dir: Callable, reg, installed, named, expected):
+def test_is_installed(tmp_run_dir: Callable, id_, installed, named, expected):
"""Test is_installed correctly identifies presence of _cylc-install dir"""
- cylc_run_dir: Path = tmp_run_dir(reg, installed=installed, named=named)
+ cylc_run_dir: Path = tmp_run_dir(id_, installed=installed, named=named)
actual = is_installed(cylc_run_dir)
assert actual == expected
diff --git a/tests/unit/test_workflow_status.py b/tests/unit/test_workflow_status.py
index 046783d4ff1..af88de3daab 100644
--- a/tests/unit/test_workflow_status.py
+++ b/tests/unit/test_workflow_status.py
@@ -36,12 +36,14 @@ def schd(
stop_mode=None,
stop_point=None,
stop_task_id=None,
+ reload_pending=False,
):
return SimpleNamespace(
is_paused=is_paused,
is_stalled=is_stalled,
stop_clock_time=stop_clock_time,
stop_mode=stop_mode,
+ reload_pending=reload_pending,
pool=SimpleNamespace(
hold_point=hold_point,
stop_point=stop_point,
@@ -58,7 +60,13 @@ def schd(
(
{'is_paused': True},
WorkflowStatus.PAUSED,
- 'paused'),
+ 'paused'
+ ),
+ (
+ {'reload_pending': 'message'},
+ WorkflowStatus.PAUSED,
+ 'reloading: message'
+ ),
(
{'stop_mode': StopMode.AUTO},
WorkflowStatus.STOPPING,
diff --git a/tests/unit/tui/test_data.py b/tests/unit/tui/test_data.py
index a2d17bf2e76..85805a5d1ea 100644
--- a/tests/unit/tui/test_data.py
+++ b/tests/unit/tui/test_data.py
@@ -28,7 +28,7 @@ def test_generate_mutation(monkeypatch):
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
- ['foo', 'bar']
+ {'foo': 'foo', 'bar': 'bar', 'user': 'user'}
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
diff --git a/tests/unit/tui/test_overlay.py b/tests/unit/tui/test_overlay.py
index 42334aac009..013e8480c21 100644
--- a/tests/unit/tui/test_overlay.py
+++ b/tests/unit/tui/test_overlay.py
@@ -21,7 +21,9 @@
import pytest
import urwid
+from cylc.flow.tui.app import BINDINGS
import cylc.flow.tui.overlay
+from cylc.flow.workflow_status import WorkflowStatus
@pytest.fixture
@@ -39,6 +41,7 @@ def overlay_functions():
getattr(cylc.flow.tui.overlay, obj.name)
for obj in tree.body
if isinstance(obj, ast.FunctionDef)
+ and not obj.name.startswith('_')
]
@@ -47,14 +50,21 @@ def test_interface(overlay_functions):
for function in overlay_functions:
# mock up an app object to keep things working
app = Mock(
- filter_states={},
+ filters={'tasks': {}, 'workflows': {'id': '.*'}},
+ bindings=BINDINGS,
tree_walker=Mock(
get_focus=Mock(
return_value=[
Mock(
get_node=Mock(
return_value=Mock(
- get_value=lambda: {'id_': 'a'}
+ get_value=lambda: {
+ 'id_': '~u/a',
+ 'type_': 'workflow',
+ 'data': {
+ 'status': WorkflowStatus.RUNNING,
+ },
+ }
)
)
)
diff --git a/tests/unit/tui/test_util.py b/tests/unit/tui/test_util.py
index 00ac9fa95be..2b3231e0f7e 100644
--- a/tests/unit/tui/test_util.py
+++ b/tests/unit/tui/test_util.py
@@ -189,77 +189,87 @@ def test_compute_tree():
"""
tree = compute_tree({
- 'id': 'workflow id',
- 'cyclePoints': [
- {
- 'id': '1/family-suffix',
- 'cyclePoint': '1'
- }
- ],
- 'familyProxies': [
- { # top level family
- 'name': 'FOO',
- 'id': '1/FOO',
- 'cyclePoint': '1',
- 'firstParent': {'name': 'root', 'id': '1/root'}
- },
- { # nested family
- 'name': 'FOOT',
- 'id': '1/FOOT',
- 'cyclePoint': '1',
- 'firstParent': {'name': 'FOO', 'id': '1/FOO'}
- },
- ],
- 'taskProxies': [
- { # top level task
- 'name': 'pub',
- 'id': '1/pub',
- 'firstParent': {'name': 'root', 'id': '1/root'},
- 'cyclePoint': '1',
- 'jobs': []
- },
- { # child task (belongs to family)
- 'name': 'fan',
- 'id': '1/fan',
- 'firstParent': {'name': 'fan', 'id': '1/fan'},
- 'cyclePoint': '1',
- 'jobs': []
- },
- { # nested child task (belongs to incestuous family)
- 'name': 'fool',
- 'id': '1/fool',
- 'firstParent': {'name': 'FOOT', 'id': '1/FOOT'},
- 'cyclePoint': '1',
- 'jobs': []
- },
- { # a task which has jobs
- 'name': 'worker',
- 'id': '1/worker',
- 'firstParent': {'name': 'root', 'id': '1/root'},
- 'cyclePoint': '1',
- 'jobs': [
- {'id': '1/worker/03', 'submitNum': '3'},
- {'id': '1/worker/02', 'submitNum': '2'},
- {'id': '1/worker/01', 'submitNum': '1'}
- ]
- }
- ]
+ 'workflows': [{
+ 'id': 'workflow id',
+ 'port': 1234,
+ 'cyclePoints': [
+ {
+ 'id': '1/family-suffix',
+ 'cyclePoint': '1'
+ }
+ ],
+ 'familyProxies': [
+ { # top level family
+ 'name': 'FOO',
+ 'id': '1/FOO',
+ 'cyclePoint': '1',
+ 'firstParent': {'name': 'root', 'id': '1/root'}
+ },
+ { # nested family
+ 'name': 'FOOT',
+ 'id': '1/FOOT',
+ 'cyclePoint': '1',
+ 'firstParent': {'name': 'FOO', 'id': '1/FOO'}
+ },
+ ],
+ 'taskProxies': [
+ { # top level task
+ 'name': 'pub',
+ 'id': '1/pub',
+ 'firstParent': {'name': 'root', 'id': '1/root'},
+ 'cyclePoint': '1',
+ 'jobs': []
+ },
+ { # child task (belongs to family)
+ 'name': 'fan',
+ 'id': '1/fan',
+ 'firstParent': {'name': 'fan', 'id': '1/fan'},
+ 'cyclePoint': '1',
+ 'jobs': []
+ },
+ { # nested child task (belongs to incestuous family)
+ 'name': 'fool',
+ 'id': '1/fool',
+ 'firstParent': {'name': 'FOOT', 'id': '1/FOOT'},
+ 'cyclePoint': '1',
+ 'jobs': []
+ },
+ { # a task which has jobs
+ 'name': 'worker',
+ 'id': '1/worker',
+ 'firstParent': {'name': 'root', 'id': '1/root'},
+ 'cyclePoint': '1',
+ 'jobs': [
+ {'id': '1/worker/03', 'submitNum': '3'},
+ {'id': '1/worker/02', 'submitNum': '2'},
+ {'id': '1/worker/01', 'submitNum': '1'}
+ ]
+ }
+ ]
+ }]
})
+ # the root node
+ assert tree['type_'] == 'root'
+ assert tree['id_'] == 'root'
+ assert len(tree['children']) == 1
+
# the workflow node
- assert tree['type_'] == 'workflow'
- assert tree['id_'] == 'workflow id'
- assert list(tree['data']) == [
+ workflow = tree['children'][0]
+ assert workflow['type_'] == 'workflow'
+ assert workflow['id_'] == 'workflow id'
+ assert set(workflow['data']) == {
# whatever if present on the node should end up in data
- 'id',
'cyclePoints',
'familyProxies',
+ 'id',
+ 'port',
'taskProxies'
- ]
- assert len(tree['children']) == 1
+ }
+ assert len(workflow['children']) == 1
# the cycle point node
- cycle = tree['children'][0]
+ cycle = workflow['children'][0]
assert cycle['type_'] == 'cycle'
assert cycle['id_'] == '//1'
assert list(cycle['data']) == [
diff --git a/tests/unit/xtriggers/test_workflow_state.py b/tests/unit/xtriggers/test_workflow_state.py
index 99a8c6d5fe0..b3d25737cc2 100644
--- a/tests/unit/xtriggers/test_workflow_state.py
+++ b/tests/unit/xtriggers/test_workflow_state.py
@@ -14,18 +14,20 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from pathlib import Path
+import sqlite3
from typing import Callable
from unittest.mock import Mock
-
+from cylc.flow.workflow_files import WorkflowFiles
from cylc.flow.xtriggers.workflow_state import workflow_state
from ..conftest import MonkeyMock
def test_inferred_run(tmp_run_dir: Callable, monkeymock: MonkeyMock):
"""Test that the workflow_state xtrigger infers the run number"""
- reg = 'isildur'
- expected_workflow_id = f'{reg}/run1'
+ id_ = 'isildur'
+ expected_workflow_id = f'{id_}/run1'
cylc_run_dir = str(tmp_run_dir())
tmp_run_dir(expected_workflow_id, installed=True, named=True)
mock_db_checker = monkeymock(
@@ -35,6 +37,50 @@ def test_inferred_run(tmp_run_dir: Callable, monkeymock: MonkeyMock):
)
)
- _, results = workflow_state(reg, task='precious', point='3000')
+ _, results = workflow_state(id_, task='precious', point='3000')
mock_db_checker.assert_called_once_with(cylc_run_dir, expected_workflow_id)
assert results['workflow'] == expected_workflow_id
+
+
+def test_back_compat(tmp_run_dir):
+ """Test workflow_state xtrigger backwards compatibility with Cylc 7
+ database."""
+ id_ = 'celebrimbor'
+ c7_run_dir: Path = tmp_run_dir(id_)
+ (c7_run_dir / WorkflowFiles.FLOW_FILE).rename(
+ c7_run_dir / WorkflowFiles.SUITE_RC
+ )
+ db_file = c7_run_dir / 'log' / 'db'
+ db_file.parent.mkdir(exist_ok=True)
+ # Note: cannot use CylcWorkflowDAO here as creating outdated DB
+ conn = sqlite3.connect(str(db_file))
+ try:
+ conn.execute(r"""
+ CREATE TABLE suite_params(key TEXT, value TEXT, PRIMARY KEY(key));
+ """)
+ conn.execute(r"""
+ CREATE TABLE task_states(
+ name TEXT, cycle TEXT, time_created TEXT, time_updated TEXT,
+ submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle)
+ );
+ """)
+ conn.executemany(
+ r'INSERT INTO "suite_params" VALUES(?,?);',
+ [('cylc_version', '7.8.12'),
+ ('cycle_point_format', '%Y'),
+ ('cycle_point_tz', 'Z')]
+ )
+ conn.execute(r"""
+ INSERT INTO "task_states" VALUES(
+ 'mithril','2012','2023-01-30T18:19:15Z','2023-01-30T18:19:15Z',
+ 0,'succeeded'
+ );
+ """)
+ conn.commit()
+ finally:
+ conn.close()
+
+ satisfied, _ = workflow_state(id_, task='mithril', point='2012')
+ assert satisfied
+ satisfied, _ = workflow_state(id_, task='arkenstone', point='2012')
+ assert not satisfied