From 202a3ece9705289a1f12c85e64cf90307ca85c39 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 7 Jun 2024 14:49:18 +0200 Subject: [PATCH 01/42] CLI: Add the `verdi profile configure-rabbitmq` command (#6454) Now that profiles can be created without defining a broker, a command is needed that can add a RabbitMQ connection configuration. The new command `verdi profile configure-rabbitmq` enables a broker for a profile if it wasn't already, and allows configuring the connection parameters. --- docs/source/reference/command_line.rst | 14 +++++------ src/aiida/cmdline/commands/cmd_profile.py | 22 +++++++++++++++++- .../cmdline/params/options/commands/setup.py | 2 ++ tests/cmdline/commands/test_profile.py | 23 +++++++++++++++++++ 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index aa00b218e4..0953d027f7 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -391,13 +391,13 @@ Below is a list with all available subcommands. --help Show this message and exit. Commands: - delete Delete one or more profiles. - list Display a list of all available profiles. - set-default Set a profile as the default profile. - setdefault (Deprecated) Set a profile as the default profile (use `verdi profile set- - default`). - setup Set up a new profile. - show Show details for a profile. + configure-rabbitmq Configure RabbitMQ for a profile. + delete Delete one or more profiles. + list Display a list of all available profiles. + set-default Set a profile as the default profile. + setdefault (Deprecated) Set a profile as the default profile. + setup Set up a new profile. + show Show details for a profile. .. _reference:command-line:verdi-quicksetup: diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 2de2ce173d..0b8065a9a8 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -128,6 +128,26 @@ def profile_setup(): """Set up a new profile.""" +@verdi_profile.command('configure-rabbitmq') # type: ignore[arg-type] +@arguments.PROFILE(default=defaults.get_default_profile) +@setup.SETUP_BROKER_PROTOCOL() +@setup.SETUP_BROKER_USERNAME() +@setup.SETUP_BROKER_PASSWORD() +@setup.SETUP_BROKER_HOST() +@setup.SETUP_BROKER_PORT() +@setup.SETUP_BROKER_VIRTUAL_HOST() +@options.NON_INTERACTIVE() +@click.pass_context +def profile_configure_rabbitmq(ctx, profile, **kwargs): + """Configure RabbitMQ for a profile. + + Enable RabbitMQ for a profile that was created without a broker, or reconfigure existing connection details. + """ + profile.set_process_controller(name='core.rabbitmq', config=kwargs) + ctx.obj.config.update_profile(profile) + ctx.obj.config.store() + + @verdi_profile.command('list') def profile_list(): """Display a list of all available profiles.""" @@ -179,7 +199,7 @@ def profile_show(profile): @verdi_profile.command('setdefault', deprecated='Please use `verdi profile set-default` instead.') @arguments.PROFILE(required=True, default=None) def profile_setdefault(profile): - """Set a profile as the default profile (use `verdi profile set-default`).""" + """Set a profile as the default profile.""" _profile_set_default(profile) diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index bbd980c976..008f51b3a0 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -50,6 +50,8 @@ def get_profile_attribute_default(attribute_tuple, ctx): try: data = ctx.params['profile'].dictionary for part in parts: + if data is None: + return default data = data[part] return data except KeyError: diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 909562245a..51594b6ca7 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -269,3 +269,26 @@ def test_setup_no_use_rabbitmq(run_cli_command, isolated_config): profile = isolated_config.get_profile(profile_name) assert profile.process_control_backend is None assert profile.process_control_config == {} + + +def test_configure_rabbitmq(run_cli_command, isolated_config): + """Test the ``verdi profile configure-rabbitmq`` command.""" + profile_name = 'profile' + + # First setup a profile without a broker configured + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) + profile = isolated_config.get_profile(profile_name) + assert profile.process_control_backend is None + assert profile.process_control_config == {} + + # Now run the command to configure the broker + options = [profile_name, '-n'] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert profile.process_control_backend == 'core.rabbitmq' + + # Call it again to check it works to reconfigure existing broker connection parameters + options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert profile.process_control_backend == 'core.rabbitmq' + assert profile.process_control_config['broker_host'] == 'rabbitmq.broker.com' From cd0f9acb4b932557a91387b7d804cb94ac4dcbb3 Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Tue, 18 Jun 2024 13:27:07 +0200 Subject: [PATCH 02/42] =?UTF-8?q?=F0=9F=90=9B=20`RabbitmqBroker`:=20catch?= =?UTF-8?q?=20`ConnectionError`=20for=20`=5F=5Fstr=5F=5F`=20(#6473)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current implementation of the `RabbitmqBroker.__str__()` method always prints both the version and the URL of the RabbitMQ server. However, the `get_rabbitmq_version()` method fails with a `ConnectionError` in case the RabbitMQ broker is not able to connect to the server. This issue would bubble up into the `verdi status` command, since this prints the string representation of the `RabbitmqBroker` in the message that reports the connection failure. At this point the `ConnectionError` is no longer caught, and hence the user is exposed to the full traceback. Here we adapt the `RabbitmqBroker.__str__()` method to catch the `ConnectionError` and return the URL with the message that the connection failed. --- src/aiida/brokers/rabbitmq/broker.py | 5 ++++- tests/brokers/test_rabbitmq.py | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/aiida/brokers/rabbitmq/broker.py b/src/aiida/brokers/rabbitmq/broker.py index dab19e28b7..5321f6d400 100644 --- a/src/aiida/brokers/rabbitmq/broker.py +++ b/src/aiida/brokers/rabbitmq/broker.py @@ -34,7 +34,10 @@ def __init__(self, profile: Profile) -> None: self._prefix = f'aiida-{self._profile.uuid}' def __str__(self): - return f'RabbitMQ v{self.get_rabbitmq_version()} @ {self.get_url()}' + try: + return f'RabbitMQ v{self.get_rabbitmq_version()} @ {self.get_url()}' + except ConnectionError: + return f'RabbitMQ @ {self.get_url()} ' def close(self): """Close the broker.""" diff --git a/tests/brokers/test_rabbitmq.py b/tests/brokers/test_rabbitmq.py index fc27a3eaf6..00ee662338 100644 --- a/tests/brokers/test_rabbitmq.py +++ b/tests/brokers/test_rabbitmq.py @@ -22,6 +22,19 @@ pytestmark = pytest.mark.requires_rmq +def test_str_method(monkeypatch, manager): + """Test the `__str__` method of the `RabbitmqBroker`.""" + + def raise_connection_error(): + raise ConnectionError + + broker = manager.get_broker() + assert 'RabbitMQ v' in str(broker) + + monkeypatch.setattr(broker, 'get_communicator', raise_connection_error) + assert 'RabbitMQ @' in str(broker) + + @pytest.mark.parametrize( ('version', 'supported'), ( From 022f049bfcf86609daf0c2d9ddc0b1c108b9ea7c Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 09:39:22 +0200 Subject: [PATCH 03/42] CLI: Fix bug with profile name determination in `verdi presto` (#6477) When the user is using `verdi presto` to create more than 11 profiles, the command will fail because `presto-10` already exists. This is due to the fact that the `get_default_presto_profile_name()` function sorts the existing indices as strings, which means `10` will precede `9` and hence the "last index" would be `9`, making the new index `10`, which already exists. Here we fix this issue by casting the extracted existing indices as integers, so the sorting works as intended. --- src/aiida/cmdline/commands/cmd_presto.py | 2 +- tests/cmdline/commands/test_presto.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index b61a8b6cfd..1893a6d461 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -32,7 +32,7 @@ def get_default_presto_profile_name(): for profile_name in profile_names: if match := re.search(r'presto[-]?(\d+)?', profile_name): - indices.append(match.group(1) or '0') + indices.append(int(match.group(1) or '0')) if not indices: return DEFAULT_PROFILE_NAME_PREFIX diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 13760a53b7..8651664f61 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -80,3 +80,11 @@ def test_presto_use_postgres_fail(run_cli_command): options = ['--non-interactive', '--use-postgres', '--postgres-port', str(5000)] result = run_cli_command(verdi_presto, options, raises=True) assert 'Failed to connect to the PostgreSQL server' in result.output + + +@pytest.mark.usefixtures('empty_config') +def test_presto_overdose(run_cli_command, config_with_profile_factory): + """Test that ``verdi presto`` still works for users that have over 10 presto profiles.""" + config_with_profile_factory(name='presto-10') + result = run_cli_command(verdi_presto) + assert 'Created new profile `presto-11`.' in result.output From 57598b16468030bc124846d2d995a3134659375d Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:07:30 +0200 Subject: [PATCH 04/42] =?UTF-8?q?=E2=9C=A8=20CLI:=20Make=20`NON=5FINTERACT?= =?UTF-8?q?IVE`=20option=20a=20switch=20instead=20of=20flag?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For many setup/configuration CLI commands, the `NON_INTERACTIVE` option is added to allow the user to run the command without being prompted for input and use the defaults instead. However, new users are often not aware of this option, and will not understand some options as they are prompted. Even when a sensible default is offered by the prompt, users will still want to understand the option and be unsure if the default works for them. Hence, it might be preferable to run it non-interactively by default for some commands. Here we adapt the `NON_INTERACTIVE` option into a switch (`-n/-I` or `--non-interactive`/`--interactive`) that is `--interactive` by default. --- docs/source/reference/command_line.rst | 51 +++++++++++-------- .../cmdline/params/options/interactive.py | 4 +- src/aiida/cmdline/params/options/main.py | 12 +++-- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 0953d027f7..b3edb33a0e 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -329,23 +329,26 @@ Below is a list with all available subcommands. the newly created profile uses the new PostgreSQL database instead of SQLite. Options: - --profile-name TEXT Name of the profile. By default, a unique name starting with - `presto` is automatically generated. [default: (dynamic)] - --email TEXT Email of the default user. [default: (dynamic)] - --use-postgres When toggled on, the profile uses a PostgreSQL database - instead of an SQLite one. The connection details to the - PostgreSQL server can be configured with the relevant options. - The command attempts to automatically create a user and - database to use for the profile, but this can fail depending - on the configuration of the server. - --postgres-hostname TEXT The hostname of the PostgreSQL server. - --postgres-port INTEGER The port of the PostgreSQL server. - --postgres-username TEXT The username of the PostgreSQL user that is authorized to - create new databases. - --postgres-password TEXT The password of the PostgreSQL user that is authorized to - create new databases. - -n, --non-interactive Never prompt, such as for sudo password. - --help Show this message and exit. + --profile-name TEXT Name of the profile. By default, a unique name starting + with `presto` is automatically generated. [default: + (dynamic)] + --email TEXT Email of the default user. [default: (dynamic)] + --use-postgres When toggled on, the profile uses a PostgreSQL database + instead of an SQLite one. The connection details to the + PostgreSQL server can be configured with the relevant + options. The command attempts to automatically create a + user and database to use for the profile, but this can + fail depending on the configuration of the server. + --postgres-hostname TEXT The hostname of the PostgreSQL server. + --postgres-port INTEGER The port of the PostgreSQL server. + --postgres-username TEXT The username of the PostgreSQL user that is authorized + to create new databases. + --postgres-password TEXT The password of the PostgreSQL user that is authorized + to create new databases. + -n, --non-interactive / -I, --interactive + Never prompt, such as for sudo password. [default: + (--interactive)] + --help Show this message and exit. .. _reference:command-line:verdi-process: @@ -412,8 +415,11 @@ Below is a list with all available subcommands. (Deprecated) Setup a new profile in a fully automated fashion. Options: - -n, --non-interactive In non-interactive mode, the CLI never prompts but - simply uses default values for options that define one. + -n, --non-interactive / -I, --interactive + In non-interactive mode, the CLI never prompts for + options but simply uses default values for options that + define one. In interactive mode, the CLI will prompt for + each interactive option. [default: (--interactive)] --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when @@ -516,8 +522,11 @@ Below is a list with all available subcommands. user has been created. Options: - -n, --non-interactive In non-interactive mode, the CLI never prompts but - simply uses default values for options that define one. + -n, --non-interactive / -I, --interactive + In non-interactive mode, the CLI never prompts for + options but simply uses default values for options that + define one. In interactive mode, the CLI will prompt for + each interactive option. [default: (--interactive)] --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when diff --git a/src/aiida/cmdline/params/options/interactive.py b/src/aiida/cmdline/params/options/interactive.py index c044d04907..d6c216eca9 100644 --- a/src/aiida/cmdline/params/options/interactive.py +++ b/src/aiida/cmdline/params/options/interactive.py @@ -167,9 +167,9 @@ def get_default(self, ctx: click.Context, call: bool = True) -> t.Optional[t.Uni def is_interactive(ctx: click.Context) -> bool: """Return whether the command is being run non-interactively. - This is the case if the ``non_interactive`` parameter in the context is set to ``True``. + This is the case if the ``non_interactive`` parameter in the context is set to ``False``. - :return: ``True`` if being run non-interactively, ``False`` otherwise. + :return: ``True`` if being run interactively, ``False`` otherwise. """ return not ctx.params.get('non_interactive', False) diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index 85b3090ad5..aa86a1f0dd 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -344,11 +344,15 @@ def set_log_level(_ctx, _param, value): ) NON_INTERACTIVE = OverridableOption( - '-n', - '--non-interactive', - is_flag=True, + '-n/-I', + '--non-interactive/--interactive', is_eager=True, - help='In non-interactive mode, the CLI never prompts but simply uses default values for options that define one.', + help=( + 'In non-interactive mode, the CLI never prompts for options but simply uses default values for options that ' + 'define one. In interactive mode, the CLI will prompt for each interactive option. ' + ), + default=False, + show_default='--interactive', ) DRY_RUN = OverridableOption('-n', '--dry-run', is_flag=True, help='Perform a dry run.') From aaada5454ead927a3cd2c3d48788e5730ea53aaf Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:09:12 +0200 Subject: [PATCH 05/42] =?UTF-8?q?=F0=9F=91=8C=20CLI:=20Make=20`configure-r?= =?UTF-8?q?abbitmq`=20non-interactive=20by=20default?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `configure-rabbitmq` command was introduced mainly to allow new users that set up their profile with `verdi presto` before they set up RabbitMQ to upgrade their profile to use the message broker. However, since the command is interactive, they would be prompted for each of the options when running the command without `-n`/`--non-interactive`. Users that don't understand these inputs will typically want to set the defaults, so switching the modus operandi of the commmand to be non-interactive will make life easier for these users. Users that _do_ want to set different values than the defaults will understand the options of the command and should be able to be able to provide them directly or via the interactive mode. --- src/aiida/cmdline/commands/cmd_profile.py | 2 +- tests/cmdline/commands/test_profile.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 0b8065a9a8..55d59f706e 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -136,7 +136,7 @@ def profile_setup(): @setup.SETUP_BROKER_HOST() @setup.SETUP_BROKER_PORT() @setup.SETUP_BROKER_VIRTUAL_HOST() -@options.NON_INTERACTIVE() +@options.NON_INTERACTIVE(default=True, show_default='--non-interactive') @click.pass_context def profile_configure_rabbitmq(ctx, profile, **kwargs): """Configure RabbitMQ for a profile. diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 51594b6ca7..9718bd06f5 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -287,6 +287,13 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) assert profile.process_control_backend == 'core.rabbitmq' + # Verify that running in non-interactive mode is the default + options = [ + profile_name, + ] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=True) + assert profile.process_control_backend == 'core.rabbitmq' + # Call it again to check it works to reconfigure existing broker connection parameters options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) From c2ca6423c1803f75ef198f896cb8ba85339be3ff Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:26:55 +0200 Subject: [PATCH 06/42] =?UTF-8?q?=F0=9F=91=8C=20CLI:=20Give=20feedback=20f?= =?UTF-8?q?or=20`configure-rabbitmq`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the `verdi profile configure-rabbitmq` command doesn't give any feedback to the user whether the provided options can successfully connect to the RabbitMQ server. Here we adapt the `detect_rabbitmq_config` function to accept the broker configuration as `**kwargs`, and use it to check if the provided options in the `configure-rabbitmq` can successfully connect to the RabbitMQ server. A "success" message is printed if we can connect to the server, else a warning is printed and the user is asked for confirmation before proceeding to configure the broker. A `--force` flag is also added to avoid asking for confirmation in case the command is unable to connect to the broker. --- src/aiida/brokers/rabbitmq/defaults.py | 24 +++++++++++++++-------- src/aiida/cmdline/commands/cmd_profile.py | 18 ++++++++++++++++- tests/cmdline/commands/test_profile.py | 18 ++++++++++++----- 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/aiida/brokers/rabbitmq/defaults.py b/src/aiida/brokers/rabbitmq/defaults.py index aeeaab578d..b312897f73 100644 --- a/src/aiida/brokers/rabbitmq/defaults.py +++ b/src/aiida/brokers/rabbitmq/defaults.py @@ -29,7 +29,15 @@ ) -def detect_rabbitmq_config() -> dict[str, t.Any] | None: +def detect_rabbitmq_config( + protocol: str | None = None, + username: str | None = None, + password: str | None = None, + host: str | None = None, + port: int | None = None, + virtual_host: str | None = None, + heartbeat: int | None = None, +) -> dict[str, t.Any] | None: """Try to connect to a RabbitMQ server with the default connection parameters. :returns: The connection parameters if the RabbitMQ server was successfully connected to, or ``None`` otherwise. @@ -37,13 +45,13 @@ def detect_rabbitmq_config() -> dict[str, t.Any] | None: from kiwipy.rmq.threadcomms import connect connection_params = { - 'protocol': os.getenv('AIIDA_BROKER_PROTOCOL', BROKER_DEFAULTS['protocol']), - 'username': os.getenv('AIIDA_BROKER_USERNAME', BROKER_DEFAULTS['username']), - 'password': os.getenv('AIIDA_BROKER_PASSWORD', BROKER_DEFAULTS['password']), - 'host': os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), - 'port': os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port']), - 'virtual_host': os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), - 'heartbeat': os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat']), + 'protocol': protocol or os.getenv('AIIDA_BROKER_PROTOCOL', BROKER_DEFAULTS['protocol']), + 'username': username or os.getenv('AIIDA_BROKER_USERNAME', BROKER_DEFAULTS['username']), + 'password': password or os.getenv('AIIDA_BROKER_PASSWORD', BROKER_DEFAULTS['password']), + 'host': host or os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), + 'port': port or int(os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port'])), + 'virtual_host': virtual_host or os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), + 'heartbeat': heartbeat or int(os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat'])), } LOGGER.info(f'Attempting to connect to RabbitMQ with parameters: {connection_params}') diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 55d59f706e..047126a38c 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -130,6 +130,7 @@ def profile_setup(): @verdi_profile.command('configure-rabbitmq') # type: ignore[arg-type] @arguments.PROFILE(default=defaults.get_default_profile) +@options.FORCE() @setup.SETUP_BROKER_PROTOCOL() @setup.SETUP_BROKER_USERNAME() @setup.SETUP_BROKER_PASSWORD() @@ -138,15 +139,30 @@ def profile_setup(): @setup.SETUP_BROKER_VIRTUAL_HOST() @options.NON_INTERACTIVE(default=True, show_default='--non-interactive') @click.pass_context -def profile_configure_rabbitmq(ctx, profile, **kwargs): +def profile_configure_rabbitmq(ctx, profile, non_interactive, force, **kwargs): """Configure RabbitMQ for a profile. Enable RabbitMQ for a profile that was created without a broker, or reconfigure existing connection details. """ + from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config + + connection_params = {key.lstrip('broker_'): value for key, value in kwargs.items() if key.startswith('broker_')} + + broker_config = detect_rabbitmq_config(**connection_params) + + if broker_config is None: + echo.echo_warning(f'Unable to connect to RabbitMQ server with configuration: {connection_params}') + if not force: + click.confirm('Do you want to continue with the provided configuration?', abort=True) + else: + echo.echo_success('Connected to RabbitMQ with the provided connection parameters') + profile.set_process_controller(name='core.rabbitmq', config=kwargs) ctx.obj.config.update_profile(profile) ctx.obj.config.store() + echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {connection_params}') + @verdi_profile.command('list') def profile_list(): diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 9718bd06f5..781a8b3cfa 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -284,8 +284,9 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): # Now run the command to configure the broker options = [profile_name, '-n'] - run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) assert profile.process_control_backend == 'core.rabbitmq' + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout # Verify that running in non-interactive mode is the default options = [ @@ -293,9 +294,16 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): ] run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=True) assert profile.process_control_backend == 'core.rabbitmq' + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout + + # Verify that configuring with incorrect options and `--force` raises a warning but still configures the broker + options = [profile_name, '-f', '--broker-port', '1234'] + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert 'Unable to connect to RabbitMQ server with configuration:' in cli_result.stdout + assert profile.process_control_config['broker_port'] == 1234 # Call it again to check it works to reconfigure existing broker connection parameters - options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] - run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) - assert profile.process_control_backend == 'core.rabbitmq' - assert profile.process_control_config['broker_host'] == 'rabbitmq.broker.com' + options = [profile_name, '-n', '--broker-port', '5672'] + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout + assert profile.process_control_config['broker_port'] == 5672 From 6db2f4060d4ece4552f5fe757c0f7d938810f4d1 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 24 Jun 2024 10:24:13 +0200 Subject: [PATCH 07/42] Dependencies: Update `tabulate>=0.8.0,<0.10.0` (#6472) --- environment.yml | 2 +- pyproject.toml | 2 +- requirements/requirements-py-3.10.txt | 2 +- requirements/requirements-py-3.11.txt | 2 +- requirements/requirements-py-3.12.txt | 2 +- requirements/requirements-py-3.9.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/environment.yml b/environment.yml index ba2bff4c93..98dd997ba1 100644 --- a/environment.yml +++ b/environment.yml @@ -32,7 +32,7 @@ dependencies: - pyyaml~=6.0 - requests~=2.0 - sqlalchemy~=2.0 -- tabulate~=0.8.5 +- tabulate<0.10.0,>=0.8.0 - tqdm~=4.45 - upf_to_json~=0.9.2 - wrapt~=1.11 diff --git a/pyproject.toml b/pyproject.toml index 85b5795a99..c70c7a96de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ dependencies = [ 'pyyaml~=6.0', 'requests~=2.0', 'sqlalchemy~=2.0', - 'tabulate~=0.8.5', + 'tabulate>=0.8.0,<0.10.0', 'tqdm~=4.45', 'upf_to_json~=0.9.2', 'wrapt~=1.11' diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index f8c0903e2d..3955a57530 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -190,7 +190,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index 1de3c788d7..feedaae17a 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -189,7 +189,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 86d44d4c36..3246ddc471 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -187,7 +187,7 @@ sqlalchemy==2.0.23 sqlalchemy-utils==0.37.9 stack-data==0.6.3 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.3 terminado==0.17.1 tinycss2==1.2.1 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 69ffaf4f80..5b0d89b5bc 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -192,7 +192,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 From 63160995d6078051f0f5e524b6fab9aabb2747ed Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 10:24:28 +0200 Subject: [PATCH 08/42] CLI: Remove the RabbitMQ options from `verdi profile setup` (#6480) For the vast majority of use cases, users will have a default setup for RabbitMQ and so the default configuration will be adequate and so they will not need the options in the command. On the flipside, showing the options by default can makes the command harder to use as users will take pause to think what value to pass. Since there is the `verdi profile configure-rabbitmq` command now that allows to configure or reconfigure the RabbitMQ connection parameters for an existing profile, it is fine to remove these options from the profile setup. Advanced users that need to customize the connection parameters can resort to that separate command. --- src/aiida/brokers/rabbitmq/defaults.py | 7 +- src/aiida/cmdline/commands/cmd_presto.py | 40 +++++++--- src/aiida/cmdline/commands/cmd_profile.py | 90 +++++++++++------------ tests/cmdline/commands/test_presto.py | 5 +- tests/cmdline/commands/test_profile.py | 2 +- 5 files changed, 78 insertions(+), 66 deletions(-) diff --git a/src/aiida/brokers/rabbitmq/defaults.py b/src/aiida/brokers/rabbitmq/defaults.py index b312897f73..21a15f1ad0 100644 --- a/src/aiida/brokers/rabbitmq/defaults.py +++ b/src/aiida/brokers/rabbitmq/defaults.py @@ -36,10 +36,10 @@ def detect_rabbitmq_config( host: str | None = None, port: int | None = None, virtual_host: str | None = None, - heartbeat: int | None = None, -) -> dict[str, t.Any] | None: +) -> dict[str, t.Any]: """Try to connect to a RabbitMQ server with the default connection parameters. + :raises ConnectionError: If the connection failed with the provided connection parameters :returns: The connection parameters if the RabbitMQ server was successfully connected to, or ``None`` otherwise. """ from kiwipy.rmq.threadcomms import connect @@ -51,7 +51,6 @@ def detect_rabbitmq_config( 'host': host or os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), 'port': port or int(os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port'])), 'virtual_host': virtual_host or os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), - 'heartbeat': heartbeat or int(os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat'])), } LOGGER.info(f'Attempting to connect to RabbitMQ with parameters: {connection_params}') @@ -59,7 +58,7 @@ def detect_rabbitmq_config( try: connect(connection_params=connection_params) except ConnectionError: - return None + raise ConnectionError(f'Failed to connect with following connection parameters: {connection_params}') # The profile configuration expects the keys of the broker config to be prefixed with ``broker_``. return {f'broker_{key}': value for key, value in connection_params.items()} diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 1893a6d461..d0835b8956 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -50,7 +50,13 @@ def detect_postgres_config( postgres_password: str, non_interactive: bool, ) -> dict[str, t.Any]: - """.""" + """Attempt to connect to the given PostgreSQL server and create a new user and database. + + :raises ConnectionError: If no connection could be established to the PostgreSQL server or a user and database + could not be created. + :returns: The connection configuration for the newly created user and database as can be used directly for the + storage configuration of the ``core.psql_dos`` storage plugin. + """ import secrets from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER @@ -65,7 +71,7 @@ def detect_postgres_config( postgres = Postgres(interactive=not non_interactive, quiet=False, dbinfo=dbinfo) if not postgres.is_connected: - echo.echo_critical(f'Failed to connect to the PostgreSQL server using parameters: {dbinfo}') + raise ConnectionError(f'Failed to connect to the PostgreSQL server using parameters: {dbinfo}') database_name = f'aiida-{profile_name}' database_username = f'aiida-{profile_name}' @@ -76,7 +82,7 @@ def detect_postgres_config( dbname=database_name, dbuser=database_username, dbpass=database_password ) except Exception as exception: - echo.echo_critical(f'Unable to automatically create the PostgreSQL user and database: {exception}') + raise ConnectionError(f'Unable to automatically create the PostgreSQL user and database: {exception}') return { 'database_hostname': postgres_hostname, @@ -175,23 +181,33 @@ def verdi_presto( 'postgres_password': postgres_password, 'non_interactive': non_interactive, } - storage_config: dict[str, t.Any] = detect_postgres_config(**postgres_config_kwargs) if use_postgres else {} - storage_backend = 'core.psql_dos' if storage_config else 'core.sqlite_dos' + + storage_backend: str = 'core.sqlite_dos' + storage_config: dict[str, t.Any] = {} if use_postgres: - echo.echo_report( - '`--use-postgres` enabled and database creation successful: configuring the profile to use PostgreSQL.' - ) + try: + storage_config = detect_postgres_config(**postgres_config_kwargs) + except ConnectionError as exception: + echo.echo_critical(str(exception)) + else: + echo.echo_report( + '`--use-postgres` enabled and database creation successful: configuring the profile to use PostgreSQL.' + ) + storage_backend = 'core.psql_dos' else: echo.echo_report('Option `--use-postgres` not enabled: configuring the profile to use SQLite.') - broker_config = detect_rabbitmq_config() - broker_backend = 'core.rabbitmq' if broker_config is not None else None + broker_backend = None + broker_config = None - if broker_config is None: - echo.echo_report('RabbitMQ server not found: configuring the profile without a broker.') + try: + broker_config = detect_rabbitmq_config() + except ConnectionError as exception: + echo.echo_report(f'RabbitMQ server not found ({exception}): configuring the profile without a broker.') else: echo.echo_report('RabbitMQ server detected: configuring the profile with a broker.') + broker_backend = 'core.rabbitmq' try: profile = create_profile( diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 047126a38c..5e89b72d70 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -27,7 +27,17 @@ def verdi_profile(): def command_create_profile( - ctx: click.Context, storage_cls, non_interactive: bool, profile: Profile, set_as_default: bool = True, **kwargs + ctx: click.Context, + storage_cls, + non_interactive: bool, + profile: Profile, + set_as_default: bool = True, + email: str | None = None, + first_name: str | None = None, + last_name: str | None = None, + institution: str | None = None, + use_rabbitmq: bool = True, + **kwargs, ): """Create a new profile, initialise its storage and create a default user. @@ -37,43 +47,44 @@ def command_create_profile( :param profile: The profile instance. This is an empty ``Profile`` instance created by the command line argument which currently only contains the selected profile name for the profile that is to be created. :param set_as_default: Whether to set the created profile as the new default. + :param email: Email for the default user. + :param first_name: First name for the default user. + :param last_name: Last name for the default user. + :param institution: Institution for the default user. + :param use_rabbitmq: Whether to configure RabbitMQ as the broker. :param kwargs: Arguments to initialise instance of the selected storage implementation. """ + from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config from aiida.plugins.entry_point import get_entry_point_from_class - if not storage_cls.read_only and kwargs.get('email', None) is None: + if not storage_cls.read_only and email is None: raise click.BadParameter('The option is required for storages that are not read-only.', param_hint='--email') - email = kwargs.pop('email') - first_name = kwargs.pop('first_name') - last_name = kwargs.pop('last_name') - institution = kwargs.pop('institution') - _, storage_entry_point = get_entry_point_from_class(storage_cls.__module__, storage_cls.__name__) assert storage_entry_point is not None - if kwargs.pop('use_rabbitmq'): - broker_backend = 'core.rabbitmq' - broker_config = { - key: kwargs.get(key) - for key in ( - 'broker_protocol', - 'broker_username', - 'broker_password', - 'broker_host', - 'broker_port', - 'broker_virtual_host', - ) - } + broker_backend = None + broker_config = None + + if use_rabbitmq: + try: + broker_config = detect_rabbitmq_config() + except ConnectionError as exception: + echo.echo_warning(f'RabbitMQ server not reachable: {exception}.') + else: + echo.echo_success(f'RabbitMQ server detected with connection parameters: {broker_config}') + broker_backend = 'core.rabbitmq' + + echo.echo_report('RabbitMQ can be reconfigured with `verdi profile configure-rabbitmq`.') else: - broker_backend = None - broker_config = None + echo.echo_report('Creating profile without RabbitMQ.') + echo.echo_report('It can be configured at a later point in time with `verdi profile configure-rabbitmq`.') try: profile = create_profile( ctx.obj.config, name=profile.name, - email=email, + email=email, # type: ignore[arg-type] first_name=first_name, last_name=last_name, institution=institution, @@ -104,24 +115,6 @@ def command_create_profile( setup.SETUP_USER_LAST_NAME(), setup.SETUP_USER_INSTITUTION(), setup.SETUP_USE_RABBITMQ(), - setup.SETUP_BROKER_PROTOCOL( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_USERNAME( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_PASSWORD( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_HOST( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_PORT( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_VIRTUAL_HOST( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), ], ) def profile_setup(): @@ -146,22 +139,23 @@ def profile_configure_rabbitmq(ctx, profile, non_interactive, force, **kwargs): """ from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config - connection_params = {key.lstrip('broker_'): value for key, value in kwargs.items() if key.startswith('broker_')} - - broker_config = detect_rabbitmq_config(**connection_params) + broker_config = {key: value for key, value in kwargs.items() if key.startswith('broker_')} + connection_params = {key.lstrip('broker_'): value for key, value in broker_config.items()} - if broker_config is None: - echo.echo_warning(f'Unable to connect to RabbitMQ server with configuration: {connection_params}') + try: + broker_config = detect_rabbitmq_config(**connection_params) + except ConnectionError as exception: + echo.echo_warning(f'Unable to connect to RabbitMQ server: {exception}') if not force: click.confirm('Do you want to continue with the provided configuration?', abort=True) else: echo.echo_success('Connected to RabbitMQ with the provided connection parameters') - profile.set_process_controller(name='core.rabbitmq', config=kwargs) + profile.set_process_controller(name='core.rabbitmq', config=broker_config) ctx.obj.config.update_profile(profile) ctx.obj.config.store() - echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {connection_params}') + echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {broker_config}') @verdi_profile.command('list') diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 8651664f61..3ec1d1e5da 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -32,8 +32,11 @@ def test_presto_without_rmq(pytestconfig, run_cli_command, monkeypatch): """Test the ``verdi presto`` without RabbitMQ.""" from aiida.brokers.rabbitmq import defaults + def detect_rabbitmq_config(**kwargs): + raise ConnectionError() + # Patch the RabbitMQ detection function to pretend it could not find the service - monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: None) + monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: detect_rabbitmq_config()) result = run_cli_command(verdi_presto, ['--non-interactive']) assert 'Created new profile `presto`.' in result.output diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 781a8b3cfa..7f89cd2f31 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -299,7 +299,7 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): # Verify that configuring with incorrect options and `--force` raises a warning but still configures the broker options = [profile_name, '-f', '--broker-port', '1234'] cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) - assert 'Unable to connect to RabbitMQ server with configuration:' in cli_result.stdout + assert 'Unable to connect to RabbitMQ server: Failed to connect' in cli_result.stdout assert profile.process_control_config['broker_port'] == 1234 # Call it again to check it works to reconfigure existing broker connection parameters From 8ea203cd9b1d2fbb4a3b38ba67beec97bb8c7145 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Fri, 28 Jun 2024 10:43:17 +0200 Subject: [PATCH 09/42] CLI: Change `--profile` to `-p/--profile-name` for `verdi profile setup` (#6481) This to be consistent with naming of the option for `verdi presto`. --- docs/source/howto/archive_profile.md | 2 +- docs/source/reference/command_line.rst | 2 +- docs/source/topics/storage.rst | 4 ++-- src/aiida/cmdline/commands/cmd_presto.py | 1 + src/aiida/cmdline/commands/cmd_profile.py | 2 +- src/aiida/cmdline/params/options/commands/setup.py | 11 +++++++++++ tests/cmdline/commands/test_profile.py | 12 ++++++------ 7 files changed, 23 insertions(+), 11 deletions(-) diff --git a/docs/source/howto/archive_profile.md b/docs/source/howto/archive_profile.md index 5a3e85cee0..d637f62d2b 100644 --- a/docs/source/howto/archive_profile.md +++ b/docs/source/howto/archive_profile.md @@ -25,7 +25,7 @@ See {ref}`how-to:share:archives` for information on how to create and migrate an The easiest way to inspect the contents of an archive is to create a profile that "mounts" the archive as its data storage: ```{code-cell} ipython3 -!verdi profile setup core.sqlite_zip -n --profile archive --filepath process.aiida +!verdi profile setup core.sqlite_zip -n --profile-name archive --filepath process.aiida ``` You can now inspect the contents of the `process.aiida` archive by using the `archive` profile in the same way you would a standard AiiDA profile. diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index b3edb33a0e..c3f3250c9c 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -329,7 +329,7 @@ Below is a list with all available subcommands. the newly created profile uses the new PostgreSQL database instead of SQLite. Options: - --profile-name TEXT Name of the profile. By default, a unique name starting + -p, --profile-name TEXT Name of the profile. By default, a unique name starting with `presto` is automatically generated. [default: (dynamic)] --email TEXT Email of the default user. [default: (dynamic)] diff --git a/docs/source/topics/storage.rst b/docs/source/topics/storage.rst index 7b358a91f9..59d6761360 100644 --- a/docs/source/topics/storage.rst +++ b/docs/source/topics/storage.rst @@ -141,7 +141,7 @@ A fully operational profile using this storage plugin can be created with a sing .. code-block:: console - verdi profile setup core.sqlite_dos -n --profile --email + verdi profile setup core.sqlite_dos -n --profile-name --email replacing ```` with the desired name for the profile and ```` with the email for the default user. @@ -167,7 +167,7 @@ However, since otherwise it functions like normal storage plugins, a profile can .. code-block:: console - verdi profile setup core.sqlite_zip -n --profile --filepath + verdi profile setup core.sqlite_zip -n --profile-name --filepath replacing ```` with the desired name for the profile and ```` the path to the archive file. The created profile can now be loaded like any other profile, and the contents of the provenance graph can be explored as usual. diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index d0835b8956..64a17fdac2 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -96,6 +96,7 @@ def detect_postgres_config( @verdi.command('presto') @click.option( + '-p', '--profile-name', default=lambda: get_default_presto_profile_name(), show_default=True, diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 5e89b72d70..057f2de5a9 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -108,7 +108,7 @@ def command_create_profile( command=command_create_profile, entry_point_group='aiida.storage', shared_options=[ - setup.SETUP_PROFILE(), + setup.SETUP_PROFILE_NAME(), setup.SETUP_PROFILE_SET_AS_DEFAULT(), setup.SETUP_USER_EMAIL(required=False), setup.SETUP_USER_FIRST_NAME(), diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index 008f51b3a0..40df742d4e 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -181,6 +181,17 @@ def get_quicksetup_password(ctx, param, value): cls=options.interactive.InteractiveOption, ) +SETUP_PROFILE_NAME = options.OverridableOption( + '-p', + '--profile-name', + 'profile', + prompt='Profile name', + help='The name of the new profile.', + required=True, + type=types.ProfileParamType(cannot_exist=True), + cls=options.interactive.InteractiveOption, +) + SETUP_PROFILE_SET_AS_DEFAULT = options.OverridableOption( '--set-as-default/--no-set-as-default', prompt='Set as default?', diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 7f89cd2f31..07b45c2818 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -173,7 +173,7 @@ def test_delete_storage(run_cli_command, isolated_config, tmp_path, entry_point) else: filepath = tmp_path / 'storage' - options = [entry_point, '-n', '--filepath', str(filepath), '--profile', profile_name, '--email', 'email@host'] + options = [entry_point, '-n', '--filepath', str(filepath), '--profile-name', profile_name, '--email', 'email@host'] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert filepath.exists() assert profile_name in isolated_config.profile_names @@ -204,7 +204,7 @@ def test_setup(config_psql_dos, run_cli_command, isolated_config, tmp_path, entr options = ['--filepath', str(tmp_path)] profile_name = 'temp-profile' - options = [entry_point, '-n', '--profile', profile_name, '--email', 'email@host', *options] + options = [entry_point, '-n', '--profile-name', profile_name, '--email', 'email@host', *options] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert f'Created new profile `{profile_name}`.' in result.output assert profile_name in isolated_config.profile_names @@ -221,7 +221,7 @@ def test_setup_set_as_default(run_cli_command, isolated_config, tmp_path, set_as '-n', '--filepath', str(tmp_path), - '--profile', + '--profile-name', profile_name, '--email', 'email@host', @@ -247,7 +247,7 @@ def test_setup_email_required(run_cli_command, isolated_config, tmp_path, entry_ isolated_config.unset_option('autofill.user.email') - options = [entry_point, '-n', '--filepath', str(tmp_path), '--profile', profile_name] + options = [entry_point, '-n', '--filepath', str(tmp_path), '--profile-name', profile_name] if storage_cls.read_only: result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) @@ -261,7 +261,7 @@ def test_setup_email_required(run_cli_command, isolated_config, tmp_path, entry_ def test_setup_no_use_rabbitmq(run_cli_command, isolated_config): """Test the ``--no-use-rabbitmq`` option.""" profile_name = 'profile-no-broker' - options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile-name', profile_name, '--no-use-rabbitmq'] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert f'Created new profile `{profile_name}`.' in result.output @@ -276,7 +276,7 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): profile_name = 'profile' # First setup a profile without a broker configured - options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile-name', profile_name, '--no-use-rabbitmq'] run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) profile = isolated_config.get_profile(profile_name) assert profile.process_control_backend is None From 56995e1c4c7f0ebc87059b33564e84366c81c5ff Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 11:04:45 +0200 Subject: [PATCH 10/42] Engine: Improve error message when submitting without broker (#6465) The `aiida.engine.launch.submit` method was just raising a vague `AssertionError` in case the runner did not have a communicator, which is the case if it was constructed without a communicator which in turn happens for profiles that do not configure a broker. Since profiles without brokers are now supported and users are bound to try to submit anyway, the error message should be clearer. --- src/aiida/engine/launch.py | 10 +++++++++- tests/engine/test_launch.py | 34 +++++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/src/aiida/engine/launch.py b/src/aiida/engine/launch.py index 013fd4d690..d37cf46905 100644 --- a/src/aiida/engine/launch.py +++ b/src/aiida/engine/launch.py @@ -111,8 +111,16 @@ def submit( raise InvalidOperation('Cannot use top-level `submit` from within another process, use `self.submit` instead') runner = manager.get_manager().get_runner() + + if runner.controller is None: + raise InvalidOperation( + 'Cannot submit because the runner does not have a process controller, probably because the profile does ' + 'not define a broker like RabbitMQ. If a RabbitMQ server is available, the profile can be configured to ' + 'use it with `verdi profile configure-rabbitmq`. Otherwise, use :meth:`aiida.engine.launch.run` instead to ' + 'run the process in the local Python interpreter instead of submitting it to the daemon.' + ) + assert runner.persister is not None, 'runner does not have a persister' - assert runner.controller is not None, 'runner does not have a controller' process_inited = instantiate_process(runner, process, **inputs) diff --git a/tests/engine/test_launch.py b/tests/engine/test_launch.py index fedd42cd98..92fba4bd6b 100644 --- a/tests/engine/test_launch.py +++ b/tests/engine/test_launch.py @@ -20,6 +20,16 @@ ArithmeticAddCalculation = CalculationFactory('core.arithmetic.add') +@pytest.fixture +def arithmetic_add_builder(aiida_code_installed): + builder = ArithmeticAddCalculation.get_builder() + builder.code = aiida_code_installed(default_calc_job_plugin='core.arithmetic.add', filepath_executable='/bin/bash') + builder.x = orm.Int(1) + builder.y = orm.Int(1) + builder.metadata = {'options': {'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}}} + return builder + + @calcfunction def add(term_a, term_b): return term_a + term_b @@ -69,18 +79,28 @@ def add(self): @pytest.mark.usefixtures('started_daemon_client') -def test_submit_wait(aiida_code_installed): +def test_submit_wait(arithmetic_add_builder): """Test the ``wait`` argument of :meth:`aiida.engine.launch.submit`.""" - builder = ArithmeticAddCalculation.get_builder() - builder.code = aiida_code_installed(default_calc_job_plugin='core.arithmetic.add', filepath_executable='/bin/bash') - builder.x = orm.Int(1) - builder.y = orm.Int(1) - builder.metadata = {'options': {'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}}} - node = launch.submit(builder, wait=True, wait_interval=0.1) + node = launch.submit(arithmetic_add_builder, wait=True, wait_interval=0.1) assert node.is_finished, node.process_state assert node.is_finished_ok, node.exit_code +def test_submit_no_broker(arithmetic_add_builder, monkeypatch, manager): + """Test that ``submit`` raises ``InvalidOperation`` if the runner does not have a controller. + + The runner does not have a controller if the runner was not provided a communicator which is the case for profiles + that do not define a broker. + """ + runner = manager.get_runner() + monkeypatch.setattr(runner, '_controller', None) + + with pytest.raises( + exceptions.InvalidOperation, match=r'Cannot submit because the runner does not have a process controller.*' + ): + launch.submit(arithmetic_add_builder) + + def test_await_processes_invalid(): """Test :func:`aiida.engine.launch.await_processes` for invalid inputs.""" with pytest.raises(TypeError): From 6a3a59b29ba64401828d9ab51dc123060868278b Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Fri, 28 Jun 2024 11:07:32 +0200 Subject: [PATCH 11/42] Doc: Fixing several small issues (#6392) The recursive workchain in code snippets can theoretically not run and is just confusing to have as an example for a user. It has been fixed by using different name for the inner workchain. In the classes `NodeCaching` and `ProcessNodeCaching` the `is valid_cache` is a property. To not render it with method brackets, the `:attr:` sphinx directive is used instead of `:meth:`. --- docs/source/internals/engine.rst | 4 ++-- docs/source/topics/provenance/caching.rst | 8 ++++---- .../snippets/workchains/run_workchain_submit_append.py | 5 ++++- .../snippets/workchains/run_workchain_submit_complete.py | 5 ++++- .../snippets/workchains/run_workchain_submit_parallel.py | 5 ++++- .../workchains/run_workchain_submit_parallel_nested.py | 5 ++++- 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/docs/source/internals/engine.rst b/docs/source/internals/engine.rst index 42652c3132..3e6f30360b 100644 --- a/docs/source/internals/engine.rst +++ b/docs/source/internals/engine.rst @@ -20,14 +20,14 @@ There are several methods which the internal classes of AiiDA use to control the On the level of the generic :class:`orm.Node ` class: -* The :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property determines whether a particular node can be used as a cache. +* The :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property determines whether a particular node can be used as a cache. This is used for example to disable caching from failed calculations. * Node classes have a ``_cachable`` attribute, which can be set to ``False`` to completely switch off caching for nodes of that class. This avoids performing queries for the hash altogether. On the level of the :class:`Process ` and :class:`orm.ProcessNode ` classes: -* The :meth:`ProcessNodeCaching.is_valid_cache ` calls :meth:`Process.is_valid_cache `, passing the node itself. +* The :attr:`ProcessNodeCaching.is_valid_cache ` calls :meth:`Process.is_valid_cache `, passing the node itself. This can be used in :class:`~aiida.engine.processes.process.Process` subclasses (e.g. in calculation plugins) to implement custom ways of invalidating the cache. * The :meth:`ProcessNodeCaching._hash_ignored_inputs ` attribute lists the inputs that should be ignored when creating the hash. This is checked by the :meth:`ProcessNodeCaching.get_objects_to_hash ` method. diff --git a/docs/source/topics/provenance/caching.rst b/docs/source/topics/provenance/caching.rst index 6a923d8931..b15a3cbeda 100644 --- a/docs/source/topics/provenance/caching.rst +++ b/docs/source/topics/provenance/caching.rst @@ -146,9 +146,9 @@ This method calls the iterator :meth:`~aiida.orm.nodes.caching.NodeCaching._iter To find the list of `source` nodes that are equivalent to the `target` that is being stored, :meth:`~aiida.orm.nodes.caching.NodeCaching._iter_all_same_nodes` performs the following steps: 1. It queries the database for all nodes that have the same hash as the `target` node. -2. From the result, only those nodes are returned where the property :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` returns ``True``. +2. From the result, only those nodes are returned where the property :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` returns ``True``. -The property :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` therefore allows to control whether a stored node can be used as a `source` in the caching mechanism. +The property :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` therefore allows to control whether a stored node can be used as a `source` in the caching mechanism. By default, for all nodes, the property returns ``True``. However, this can be changed on a per-node basis, by setting it to ``False`` @@ -166,8 +166,8 @@ Setting the property to ``False``, will cause an extra to be stored on the node Through this method, it is possible to guarantee that individual nodes are never used as a `source` for caching. -The :class:`~aiida.engine.processes.process.Process` class overrides the :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property to give more fine-grained control on process nodes as caching sources. -If either :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` of the base class or :meth:`~aiida.orm.nodes.process.process.ProcessNode.is_finished` returns ``False``, the process node is not a valid source. +The :class:`~aiida.engine.processes.process.Process` class overrides the :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property to give more fine-grained control on process nodes as caching sources. +If either :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` of the base class or :meth:`~aiida.orm.nodes.process.process.ProcessNode.is_finished` returns ``False``, the process node is not a valid source. Likewise, if the process class cannot be loaded from the node, through the :meth:`~aiida.orm.nodes.process.process.ProcessNode.process_class`, the node is not a valid caching source. Finally, if the associated process class implements the :meth:`~aiida.engine.processes.process.Process.is_valid_cache` method, it is called, passing the node as an argument. If that returns ``True``, the node is considered to be a valid caching source. diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py index 07bb32a7e4..1073e7b59b 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain, append_ +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) self.to_context(workchains=append_(future)) def inspect_workchains(self): diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py index 8b7d2f5041..b325ab5c59 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py @@ -1,4 +1,7 @@ from aiida.engine import ToContext, WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -11,7 +14,7 @@ def define(cls, spec): ) def submit_workchain(self): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) return ToContext(workchain=future) def inspect_workchain(self): diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py index 1db43470f0..313c1b02e1 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) key = f'workchain_{i}' self.to_context(**{key: future}) diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py index 7b42042c83..cf80bd02b5 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) key = f'workchains.sub{i}' self.to_context(**{key: future}) From 4cecda5177c456cee252c16295416c3842bb5d2d Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 28 Jun 2024 10:11:06 +0100 Subject: [PATCH 12/42] Devops: Disable code coverage in `test-install.yml` (#6479) This should cut down the CI time by at least 10 minutes for these tests. --- .github/workflows/test-install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 0baab3c074..637b48a445 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -229,7 +229,7 @@ jobs: env: AIIDA_TEST_PROFILE: test_aiida AIIDA_WARN_v3: 1 - run: pytest --cov aiida --verbose tests -m 'not nightly' + run: pytest --verbose tests -m 'not nightly' - name: Freeze test environment run: pip freeze | sed '1d' | tee requirements-py-${{ matrix.python-version }}.txt From a6cf7fc7e02a48a7e3b9c4ba6ce5e2cd413e6b23 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 13:21:47 +0200 Subject: [PATCH 13/42] Docs: Customize the color scheme through custom style sheet (#6456) Change the default coloring of the `pydata-sphinx-theme` to use the AiiDA primary colors. --- docs/source/_static/aiida-custom.css | 107 +++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/docs/source/_static/aiida-custom.css b/docs/source/_static/aiida-custom.css index 73fcf4945f..7283171f36 100644 --- a/docs/source/_static/aiida-custom.css +++ b/docs/source/_static/aiida-custom.css @@ -1,3 +1,110 @@ +/* AiiDA colors in HTML format +blue: #0096d2 +orange: #fe7d17 +green: #30b808 +*/ + +html[data-theme="light"] { + --pst-color-primary: #0096d2; + --pst-color-secondary: #fe7d17; + --pst-color-surface: #f5f5f5; +} + +html[data-theme="dark"] { + --pst-color-primary: #0096d2; + --pst-color-secondary: #fe7d17; +} + +code { + --pst-color-inline-code: #0096d2; + font-weight: bold; +} + +html[data-theme=light] .highlight .ch, +html[data-theme=light] .highlight .sd { + color: #777777; + font-style: italic +} + +html[data-theme=light] .highlight .s1, +html[data-theme=light] .highlight .si { + color: #30b808; + font-weight: bold; +} + +html[data-theme=light] .highlight .k, +html[data-theme=light] .highlight .kc, +html[data-theme=light] .highlight .kn, +html[data-theme=light] .highlight .ow, +html[data-theme=light] .highlight .mf, +html[data-theme=light] .highlight .mi { + color: #0096d2; + font-weight: bold; +} + +html[data-theme=dark] .highlight .ch, +html[data-theme=dark] .highlight .sd { + color: #999999; + font-style: italic +} + +html[data-theme=dark] .highlight .s1, +html[data-theme=dark] .highlight .si { + color: #30b808; + font-weight: bold; +} + +html[data-theme=dark] .highlight .k, +html[data-theme=dark] .highlight .kc, +html[data-theme=dark] .highlight .kn, +html[data-theme=dark] .highlight .ow, +html[data-theme=dark] .highlight .mf, +html[data-theme=dark] .highlight .mi { + color: #0096d2; + font-weight: bold; +} + +.sd-card-hover:hover { + border-color: var(--pst-color-primary); + transform: none; +} + +.aiida-green { + color: #30b808; +} + +.aiida-blue { + color: #0096d2; +} + +.aiida-orange { + color: #fe7d17; +} + +.aiida-red { + color: rgb(192, 11, 80); +} + +img.logo-shell { + width: 20px; + padding-bottom: 3px; + margin-right: 3px; +} + +.sd-card-footer { + padding-top: 0rem; + border-top: none !important; +} + +.sd-card-footer table { + margin-bottom: 0rem; + border-color: transparent; +} + +.sd-card-footer table td:last-child { + text-align: right; +} + /* Fix CSS of top bar link icons */ a.nav-link.nav-external i { padding-left: 0.3em !important; From 45a8b461a50b90c4df1ab720dff609f72d1a2487 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 15:47:22 +0200 Subject: [PATCH 14/42] CLI: Fail early in `verdi presto` when profile name already exists (#6488) If an explicit profile name is specified with `-p/--profile-name` it should be validated as soon as possible and error if the profile already exists, before anything else is done. This prevents, for example, that a PostgreSQL user and database are created that are then not cleaned up. --- src/aiida/cmdline/commands/cmd_presto.py | 3 +++ tests/cmdline/commands/test_presto.py | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 64a17fdac2..6fa9518443 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -174,6 +174,9 @@ def verdi_presto( from aiida.manage.configuration import create_profile, load_profile from aiida.orm import Computer + if profile_name in ctx.obj.config.profile_names: + raise click.BadParameter(f'The profile `{profile_name}` already exists.', param_hint='--profile-name') + postgres_config_kwargs = { 'profile_name': profile_name, 'postgres_hostname': postgres_hostname, diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 3ec1d1e5da..80c61eaa4b 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -1,5 +1,7 @@ """Tests for ``verdi presto``.""" +import textwrap + import pytest from aiida.cmdline.commands.cmd_presto import get_default_presto_profile_name, verdi_presto from aiida.manage.configuration import profile_context @@ -50,7 +52,7 @@ def detect_rabbitmq_config(**kwargs): @pytest.mark.requires_rmq @pytest.mark.usefixtures('empty_config') -def test_presto_with_rmq(pytestconfig, run_cli_command, monkeypatch): +def test_presto_with_rmq(pytestconfig, run_cli_command): """Test the ``verdi presto``.""" result = run_cli_command(verdi_presto, ['--non-interactive']) assert 'Created new profile `presto`.' in result.output @@ -91,3 +93,21 @@ def test_presto_overdose(run_cli_command, config_with_profile_factory): config_with_profile_factory(name='presto-10') result = run_cli_command(verdi_presto) assert 'Created new profile `presto-11`.' in result.output + + +@pytest.mark.requires_psql +@pytest.mark.usefixtures('empty_config') +def test_presto_profile_name_exists(run_cli_command, config_with_profile_factory): + """Test ``verdi presto`` fails early if the specified profile name already exists.""" + profile_name = 'custom-presto' + config_with_profile_factory(name=profile_name) + options = ['--non-interactive', '--use-postgres', '--profile-name', profile_name] + result = run_cli_command(verdi_presto, options, raises=True) + # Matching for the complete literal output as a way to test that nothing else of the command was run, such as + # configuring the broker or creating a database for PostgreSQL + assert result.output == textwrap.dedent("""\ + Usage: presto [OPTIONS] + Try 'presto --help' for help. + + Error: Invalid value for --profile-name: The profile `custom-presto` already exists. + """) From 66a2dcedd0a9428b5b2218b8c82bad9c9aff4956 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 16:48:00 +0200 Subject: [PATCH 15/42] CLI: Only configure logging in `set_log_level` callback once (#6493) All `verdi` commands automatically have the `-v/--verbosity` option added. This option has a callback `set_log_level` that is invoked for each subcommand. The callback is supposed to call `configure_logging` to setup the logging configuration. Besides it being unnecessary to call it multiple times for each subcommand, it would actually cause a bug in that once the profile storage would have been loaded (through the callback of the profile option), which would have called `configure_logging` with `with_orm=True` to make sure the `DbLogHandler` was properly configured, another call to `set_log_level` would call `configure_logging` with the default values (where `with_orm=False`) and so the `DbLogHandler` would be removed. This would result in process log messages not being persisted in the database. This would be manifested when running an AiiDA process through a script invoked through `verdi` or any other CLI that uses the verbosity option provided by `aiida-core`. Since the `set_log_level` only has to make sure that the logging is configured at least once, a guard is added to skip the configuration once the `aiida.common.log.CLI_ACTIVE` global has been set by a previous invocation. --- src/aiida/cmdline/params/options/main.py | 7 +++++-- tests/cmdline/params/options/test_verbosity.py | 15 +++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index aa86a1f0dd..f5eb2d551f 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -175,7 +175,7 @@ def decorator(command): return decorator -def set_log_level(_ctx, _param, value): +def set_log_level(ctx, _param, value): """Configure the logging for the CLI command being executed. Note that we cannot use the most obvious approach of directly setting the level on the various loggers. The reason @@ -192,12 +192,15 @@ def set_log_level(_ctx, _param, value): """ from aiida.common import log + if log.CLI_ACTIVE: + return value + log.CLI_ACTIVE = True # If the value is ``None``, it means the option was not specified, but we still configure logging for the CLI # However, we skip this when we are in a tab-completion context. if value is None: - if not _ctx.resilient_parsing: + if not ctx.resilient_parsing: configure_logging() return None diff --git a/tests/cmdline/params/options/test_verbosity.py b/tests/cmdline/params/options/test_verbosity.py index 3544962b38..3573edd54f 100644 --- a/tests/cmdline/params/options/test_verbosity.py +++ b/tests/cmdline/params/options/test_verbosity.py @@ -14,7 +14,7 @@ import pytest from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.utils import echo -from aiida.common.log import AIIDA_LOGGER, LOG_LEVELS +from aiida.common import log @pytest.fixture @@ -29,10 +29,10 @@ def cmd(): The messages to the ``verdi`` are performed indirect through the utilities of the ``echo`` module. """ - assert 'cli' in [handler.name for handler in AIIDA_LOGGER.handlers] + assert 'cli' in [handler.name for handler in log.AIIDA_LOGGER.handlers] - for log_level in LOG_LEVELS.values(): - AIIDA_LOGGER.log(log_level, 'aiida') + for log_level in log.LOG_LEVELS.values(): + log.AIIDA_LOGGER.log(log_level, 'aiida') echo.echo_debug('verdi') echo.echo_info('verdi') @@ -49,7 +49,7 @@ def verify_log_output(output: str, log_level_aiida: int, log_level_verdi: int): :param log_level_aiida: The expected log level of the ``aiida`` logger. :param log_level_verdi: The expected log level of the ``verdi`` logger. """ - for log_level_name, log_level in LOG_LEVELS.items(): + for log_level_name, log_level in log.LOG_LEVELS.items(): prefix = log_level_name.capitalize() if log_level >= log_level_aiida: @@ -73,7 +73,7 @@ def test_default(run_cli_command): verify_log_output(result.output, logging.REPORT, logging.REPORT) -@pytest.mark.parametrize('option_log_level', [level for level in LOG_LEVELS.values() if level != logging.NOTSET]) +@pytest.mark.parametrize('option_log_level', [level for level in log.LOG_LEVELS.values() if level != logging.NOTSET]) @pytest.mark.usefixtures('reset_log_level') def test_explicit(run_cli_command, option_log_level): """Test explicitly settings a verbosity""" @@ -92,6 +92,9 @@ def test_config_option_override(run_cli_command, isolated_config): result = run_cli_command(cmd, raises=True, use_subprocess=False) verify_log_output(result.output, logging.ERROR, logging.WARNING) + # Manually reset the ``aiida.common.log.CLI_ACTIVE`` global otherwise the verbosity callback is a no-op + log.CLI_ACTIVE = None + # If ``--verbosity`` is explicitly defined, it override both both config options. result = run_cli_command(cmd, ['--verbosity', 'INFO'], raises=True, use_subprocess=False) verify_log_output(result.output, logging.INFO, logging.INFO) From 0ee0a0c6ae13588e82edf1cf9e8cb9857c94c31b Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 23:01:36 +0200 Subject: [PATCH 16/42] Docs: Rework the installation section (#6455) The `verdi setup` and `verdi quicksetup` commands have been deprecated and replaced by `verdi profile setup` and `verdi presto`. The installation docs were heavily outdated and the flow was scattered. The biggest change is that there now is a "quick install guide" that relies on `verdi presto` to provide an install route that is fool proof and will work on almost any system in a minimal amount of commands. Then there is the "complete installation guide" that provides all the details necessary to fully customize an installation. --- docs/source/conf.py | 6 - docs/source/howto/daemon.rst | 13 + docs/source/howto/index.rst | 1 + docs/source/howto/installation.rst | 10 +- docs/source/howto/interact.rst | 22 +- docs/source/howto/ssh.rst | 2 +- docs/source/index.rst | 45 +- docs/source/installation/docker.rst | 181 ++++++++ docs/source/installation/guide_complete.rst | 408 ++++++++++++++++++ docs/source/installation/guide_quick.rst | 85 ++++ docs/source/installation/index.rst | 97 +++++ .../troubleshooting.rst | 14 +- docs/source/intro/cheatsheet.rst | 16 - docs/source/intro/get_started.rst | 98 ----- docs/source/intro/index.rst | 23 +- docs/source/intro/install_conda.rst | 163 ------- docs/source/intro/install_system.rst | 298 ------------- docs/source/intro/installation.rst | 295 ------------- docs/source/intro/run_docker.rst | 236 ---------- docs/source/redirects.txt | 10 +- docs/source/reference/cheatsheet.rst | 16 + .../cheatsheet}/cheatsheet.png | Bin .../cheatsheet}/cheatsheet.svg | 0 .../cheatsheet}/cheatsheet_v.pdf | Bin docs/source/reference/index.rst | 1 + docs/source/topics/storage.rst | 4 +- docs/source/tutorials/basic.md | 2 +- 27 files changed, 879 insertions(+), 1167 deletions(-) create mode 100644 docs/source/howto/daemon.rst create mode 100644 docs/source/installation/docker.rst create mode 100644 docs/source/installation/guide_complete.rst create mode 100644 docs/source/installation/guide_quick.rst create mode 100644 docs/source/installation/index.rst rename docs/source/{intro => installation}/troubleshooting.rst (97%) delete mode 100644 docs/source/intro/cheatsheet.rst delete mode 100644 docs/source/intro/get_started.rst delete mode 100644 docs/source/intro/install_conda.rst delete mode 100644 docs/source/intro/install_system.rst delete mode 100644 docs/source/intro/installation.rst delete mode 100644 docs/source/intro/run_docker.rst create mode 100644 docs/source/reference/cheatsheet.rst rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet.png (100%) rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet.svg (100%) rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet_v.pdf (100%) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9cfaae4847..745ed79c30 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -49,15 +49,9 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ - 'datatypes/**', 'developer_guide/**', - 'get_started/**', - 'howto/installation_more/index.rst', 'import_export/**', 'internals/global_design.rst', - 'internals/orm.rst', - 'scheduler/index.rst', - 'working_with_aiida/**', ] # The name of the Pygments (syntax highlighting) style to use. diff --git a/docs/source/howto/daemon.rst b/docs/source/howto/daemon.rst new file mode 100644 index 0000000000..3076547206 --- /dev/null +++ b/docs/source/howto/daemon.rst @@ -0,0 +1,13 @@ + +.. _how-to:manage-daemon: + +How to manage the daemon +------------------------ + +The AiiDA daemon process runs in the background and takes care of processing your submitted calculations and workflows, checking their status, retrieving their results once they are finished and storing them in the AiiDA database. + +The AiiDA daemon is controlled using three simple commands: + +* ``verdi daemon start``: start the daemon +* ``verdi daemon status``: check the status of the daemon +* ``verdi daemon stop``: stop the daemon diff --git a/docs/source/howto/index.rst b/docs/source/howto/index.rst index 28fd41d2df..b405bc3f8a 100644 --- a/docs/source/howto/index.rst +++ b/docs/source/howto/index.rst @@ -6,6 +6,7 @@ How-To Guides :maxdepth: 1 interact + daemon plugins_install run_codes run_workflows diff --git a/docs/source/howto/installation.rst b/docs/source/howto/installation.rst index a5dc6c00a1..50547b8f21 100644 --- a/docs/source/howto/installation.rst +++ b/docs/source/howto/installation.rst @@ -14,7 +14,7 @@ Creating profiles ----------------- Each AiiDA installation can have multiple profiles, each of which can have its own individual database and file repository to store the contents of the :ref:`provenance graph`. Profiles allow you to run multiple projects completely independently from one another with just a single AiiDA installation and at least one profile is required to run AiiDA. -A new profile can be created using :ref:`verdi quicksetup` or :ref:`verdi setup`, which works similar to the former but gives more control to the user. +A new profile can be created using :ref:`verdi presto` or :ref:`verdi profile setup`, which works similar to the former but gives more control to the user. Listing profiles ---------------- @@ -296,7 +296,7 @@ Isolating multiple instances An AiiDA instance is defined as the installed source code plus the configuration folder that stores the configuration files with all the configured profiles. It is possible to run multiple AiiDA instances on a single machine, simply by isolating the code and configuration in a virtual environment. -To isolate the code, make sure to install AiiDA into a virtual environment, e.g., with conda or venv, as described :ref:`here `. +To isolate the code, make sure to install AiiDA into a virtual environment, e.g., with conda or venv. Whenever you activate this particular environment, you will be running the particular version of AiiDA (and all the plugins) that you installed specifically for it. This is separate from the configuration of AiiDA, which is stored in the configuration directory which is always named ``.aiida`` and by default is stored in the home directory. @@ -619,12 +619,12 @@ Alternatively to the CLI command, one can also manually create a backup. This re .. _how-to:installation:backup:restore: Restoring data from a backup -================================== +============================ Restoring a backed up AiiDA profile requires: * restoring the profile information in the AiiDA ``config.json`` file. Simply copy the`profiles` entry from - the backed up `config.json`to the one of the running AiiDA instance (see `verdi status` for exact location). + the backed up ``config.json`` to the one of the running AiiDA instance (see ``verdi status`` for exact location). Some information (e.g. the database parameters) might need to be updated. * restoring the data of of the backed up profile according to the ``config.json`` entry. @@ -642,7 +642,7 @@ To test if the restoration worked, run ``verdi -p status`` to ver **PostgreSQL database** - To restore the PostgreSQL database from the ``db.psql`` file that was backed up, first you should create an empty database following the instructions described in :ref:`database ` skipping the ``verdi setup`` phase. + To restore the PostgreSQL database from the ``db.psql`` file that was backed up, first you should create an empty database following the instructions described in :ref:`the installation guide `. The backed up data can then be imported by calling: .. code-block:: console diff --git a/docs/source/howto/interact.rst b/docs/source/howto/interact.rst index 530bcef915..ab6c8c6d85 100644 --- a/docs/source/howto/interact.rst +++ b/docs/source/howto/interact.rst @@ -125,6 +125,18 @@ Interactive notebooks ===================== Similar to :ref:`interactive shells `, AiiDA is also directly compatbile with interactive Python notebooks, such as `Jupyter `_. +To install the required Python packages, install ``aiida-core`` with the ``notebook`` extra, e.g. run: + +.. code-block:: console + + pip install aiida-core[notebook] + +You should now be able to start a Jupyter notebook server: + +.. code-block:: console + + jupyter notebook + To use AiiDA's Python API in a notebook, first a profile has to be loaded: .. code-block:: ipython @@ -142,8 +154,14 @@ The same can be accomplished using the following magic statement: %load_ext aiida %aiida -This magic line will replicate the same environment as :ref:`the interactive shell ` provided by ``verdi shell``. -However, it does require some one-time installation, as detailed in the section on how to :ref:`intro:install:jupyter`. +This magic line replicates the same environment as :ref:`the interactive shell ` provided by ``verdi shell``. + +It is also possible to run ``verdi`` commands inside the notebook, for example: + +.. code-block:: ipython + + %verdi status + .. _how-to:interact-restapi: diff --git a/docs/source/howto/ssh.rst b/docs/source/howto/ssh.rst index 9a2e24a366..59fe9093b6 100644 --- a/docs/source/howto/ssh.rst +++ b/docs/source/howto/ssh.rst @@ -269,6 +269,6 @@ Using kerberos tokens If the remote machine requires authentication through a Kerberos token (that you need to obtain before using ssh), you typically need to * install ``libffi`` (``sudo apt-get install libffi-dev`` under Ubuntu) -* install the ``ssh_kerberos`` extra during the installation of aiida-core (see :ref:`intro:install:setup`). +* install the ``ssh_kerberos`` extra during the installation of aiida-core (see :ref:`installation:guide-complete:python-package:optional-requirements`). If you provide all necessary ``GSSAPI`` options in your ``~/.ssh/config`` file, ``verdi computer configure`` should already pick up the appropriate values for all the gss-related options. diff --git a/docs/source/index.rst b/docs/source/index.rst index b9a218b023..e4c9d7b94d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,9 +1,5 @@ :sd_hide_title: -################################# -Welcome to AiiDA's documentation! -################################# - .. grid:: :reverse: :gutter: 2 3 3 3 @@ -23,7 +19,8 @@ Welcome to AiiDA's documentation! .. rubric:: AiiDA - An open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and reproducing the complex workflows associated with modern computational science and all associated data (see :ref:`features`). + An open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and + reproducing the complex workflows associated with modern computational science and all associated data (see :ref:`features`). **aiida-core version:** |release| @@ -32,15 +29,32 @@ Welcome to AiiDA's documentation! .. grid:: 1 2 2 2 :gutter: 3 - .. grid-item-card:: :fa:`rocket;mr-1` Getting Started + .. grid-item-card:: :fa:`circle-play;mr-1` Introduction + :text-align: center + :shadow: md + + Overview of what AiiDA is and what it can do. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: intro/index + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the introduction + + .. grid-item-card:: :fa:`rocket;mr-1` Installation :text-align: center :shadow: md - AiiDA installation, configuration and troubleshooting. + Installation guides and troubleshooting. +++++++++++++++++++++++++++++++++++++++++++++ - .. button-ref:: intro/get_started + .. button-ref:: installation/index :ref-type: doc :click-parent: :expand: @@ -87,7 +101,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Background information on AiiDA's underlying concepts. + Background information on AiiDA concepts. +++++++++++++++++++++++++++++++++++++++++++++ @@ -104,7 +118,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Comprehensive documentation of AiiDA components: command-line interface, Python interface, and RESTful API. + Comprehensive documentation of CLI, Python API and REST API. +++++++++++++++++++++++++++++++++++++++++++++ @@ -121,7 +135,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Notes on AiiDA's design and architecture aimed at core developers. + Detailed information on AiiDA's design and architecture. +++++++++++++++++++++++++++++++++++++++++++++ @@ -154,15 +168,16 @@ Welcome to AiiDA's documentation! :hidden: intro/index + installation/index tutorials/index howto/index topics/index reference/index internals/index -*********** +=========== How to cite -*********** +=========== If you use AiiDA for your research, please cite the following work: @@ -175,9 +190,9 @@ If the ADES concepts are referenced, please also cite: .. highlights:: Pizzi, Giovanni, Andrea Cepellotti, Riccardo Sabatini, Nicola Marzari, and Boris Kozinsky. *AiiDA: automated interactive infrastructure and database for computational science*, Computational Materials Science **111**, 218-230 (2016); DOI: `10.1016/j.commatsci.2015.09.013 `_ -**************** +================ Acknowledgements -**************** +================ AiiDA is supported by the `MARVEL National Centre of Competence in Research`_, the `MaX European Centre of Excellence`_ and by a number of other supporting projects, partners and institutions, whose complete list is available on the `AiiDA website acknowledgements page`_. diff --git a/docs/source/installation/docker.rst b/docs/source/installation/docker.rst new file mode 100644 index 0000000000..d1a2a2f22e --- /dev/null +++ b/docs/source/installation/docker.rst @@ -0,0 +1,181 @@ +.. _installation:docker: + +====== +Docker +====== + +The AiiDA team maintains a number of `Docker `_ images on `Docker Hub `_. +These images contain a fully pre-configured AiiDA environment which make it easy to get started using AiiDA if you are familiar with Docker. + +Currently, there are three image variants: + +.. grid:: auto + :gutter: 3 + + .. grid-item-card:: :fa:`bullseye;mr-1` aiida-core-base + :text-align: center + :shadow: md + + This is the base image. + It comes just with the ``aiida-core`` package installed. + It expects that the RabbitMQ and PostgreSQL services are provided. + + + .. grid-item-card:: :fa:`puzzle-piece;mr-1` aiida-core-with-services + :text-align: center + :shadow: md + + This images builds on top of ``aiida-core-base`` but also installs RabbitMQ and PostgreSQL as services inside the image. + This image is therefore complete and ready to be used. + + + .. grid-item-card:: :fa:`code;mr-1` aiida-core-dev + :text-align: center + :shadow: md + + This image builds on top of ``aiida-core-with-services`` with the only difference that the ``aiida-core`` package is installed from source in editable mode. + This makes this image suitable for development of the ``aiida-core`` package. + + +Start a container +================= + +To start a container from an image, run: + +.. code-block:: console + + docker run -it --name aiida aiidateam/aiida-core-with-services:latest bash + +In this example, the ``aiida-core-with-services`` image is started where ``latest`` refers to the latest tag. +The ``--name`` option is optional but is recommended as it makes it easier to restart the same container at a later point in time. +The ``-it`` option is used to run the container in interactive mode and to allocate a pseudo-TTY. +After the container start up has finished, a bash shell inside the container is opened. + +An AiiDA profile is automatically created when the container is started. +By default the profile is created using the ``core.psql_dos`` storage plugin and a default user is created. +See section :ref:`container configuration ` how to customize certain parts of this setup. + +To confirm that everything is up and running as required, run: + +.. code-block:: console + + verdi status + +which should show something like:: + + ✔ version: AiiDA v2.5.1 + ✔ config: /home/aiida/.aiida + ✔ profile: default + ✔ storage: Storage for 'default' [open] @ postgresql://aiida:***@localhost:5432 + ✔ rabbitmq: Connected to RabbitMQ v3.10.18 as amqp://guest:guest@127.0.0.1:5672 + ✔ daemon: Daemon is running with PID 324 + +If all checks show green check marks, the container is ready to go. +The container can be shut down by typing ``exit`` or pressing ``CTRL+d``. +The container can be restarted at a later time, see :ref:`restarting a container ` for details. +Any data that was created in a previous session is still available. + +.. caution:: + + When the container is not just stopped but *deleted*, any data stored in the container, including the data stored in the profile's storage, is permanently localhost + To ensure the data is not lost, it should be persisted on a volume that is mounted to the container. + Refer to the section on :ref:`persisting data ` for more details. + + +.. _installation:docker:restarting-container: + +Restarting a container +====================== + +After shutting down a container, it can be restarted with: + +.. code-block:: console + + docker start -i aiida + +The name ``aiida`` here is the reference given with the ``--name`` option when the container was originally created. +To open an interactive bash shell inside the container, run: + +.. code-block:: console + + docker exec -it aiida bash + + +.. _installation:docker:persisting-data: + +Persisting data +=============== + +The preferred way to persistently store data across Docker containers is to `create a volume `__. +To create a simple volume, run: + +.. code-block:: console + + docker volume create container-home-data + +In this case, one needs to specifically mount the volume the very first time that the container is created: + +.. code-block:: console + + docker run -it --name aiida -v container-home-data:/home/aiida aiidateam/aiida-core-with-services:latest bash + +By mounting the volume, any data that gets stored in the ``/home/aiida`` path within the container is stored in the ``container-home-data`` volume and therefore persists even if the container is deleted. + +When installing packages with pip, use the ``--user`` flag to store the Python packages installed in the mounted volume (if you mount the home specifically to a volume as mentioned above) permanently. +The packages will be installed in the ``/home/aiida/.local`` directory of the container, which is mounted on the ``container-home-data`` volume. + +You can also mount a folder in the container to a local directory, please refer to the `Docker documentation `__ for more information. + + +.. _installation:docker:container-configuration: + +Container configuration +======================= + +Upon container creation, the following environment variables can be set to configure the default profile that is created: + +* ``AIIDA_PROFILE_NAME``: the name of the profile to be created (default: ``default``) +* ``AIIDA_USER_EMAIL``: the email of the default user to be created (default: ``aiida@localhost``) +* ``AIIDA_USER_FIRST_NAME``: the first name of the default user to be created (default: ``Giuseppe``) +* ``AIIDA_USER_LAST_NAME``: the last name of the default user to be created (default: ``Verdi``) +* ``AIIDA_USER_INSTITUTION``: the institution of the default user to be created (default: ``Khedivial``) + +These environment variables can be set when starting the container with the ``-e`` option. + +.. note:: + + The ``AIIDA_CONFIG_FILE`` variable points to a path inside the container. + Therefore, if you want to use a custom configuration file, it needs to be mounted from the host path to the container path. + +.. _installation:docker:container-backup: + +Container backup +================ + +To backup the data of AiiDA, you can follow the instructions in the `Backup and restore `__ section. +However, Docker provides a convenient way to backup the container data by taking a snapshot of the entire container or the mounted volume(s). + +The following is adapted from the `Docker documentation `__. +If you don't have a volume mounted to the container, you can backup the whole container by committing the container to an image: + +.. code-block:: console + + docker container commit aiida aiida-container-backup + +The above command will create from the container ``aiida`` a new image named ``aiida-container-backup``, containing all the data and modifications made in the container. +The container can then be exported to a tarball and for it to be stored permanently: + +.. code-block:: console + + docker save -o aiida-container-backup.tar aiida-container-backup + +To restore the container, pull the image, or load from the tarball: + +.. code-block:: console + + docker load -i aiida-container-backup.tar + +This creates a container that can then be started with ``docker start``. + +Any `named volumes `__, can be backed up independently. +Refer to `Backup, restore, or migrate data volumes `__ for more information. diff --git a/docs/source/installation/guide_complete.rst b/docs/source/installation/guide_complete.rst new file mode 100644 index 0000000000..49136bbba7 --- /dev/null +++ b/docs/source/installation/guide_complete.rst @@ -0,0 +1,408 @@ +.. _installation:guide-complete: + +=========================== +Complete installation guide +=========================== + +The :ref:`quick installation guide ` is designed to make the setup as simple and portable as possible. +However, the resulting setup has some :ref:`limitations ` concerning the available functionality and performance. +This guide provides detailed information and instructions to set up a feature-complete and performant installation. + +Setting up a working installation of AiiDA, involves the following steps: + +#. :ref:`Install the Python Package ` +#. :ref:`(Optional) RabbitMQ ` +#. :ref:`Create a profile ` + + +.. _installation:guide-complete:python-package: + +Install Python Package +====================== + +.. important:: + AiiDA requires a recent version of Python. + Please refer to the `Python Package Index (PyPI) `_ for the minimum required version. + +To install AiiDA, the ``aiida-core`` Python package needs to be installed which can be done in a number of ways: + + +.. tab-set:: + + .. tab-item:: pip + + Installing ``aiida-core`` from PyPI. + + #. Install `pip `_. + #. Install ``aiida-core``: + + .. code-block:: console + + pip install aiida-core + + .. tab-item:: conda + + Installing ``aiida-core`` using Conda. + + #. Install `conda `_. + + #. Create an environment and install ``aiida-core``: + + .. code-block:: console + + conda create -n aiida -c conda-forge aiida-core + + .. tip:: + As of conda v23.10, the `dependency solver `_ has been significantly improved. + If you are experiencing long installation times, you may want to consider updating conda. + + .. tab-item:: source + + Installing ``aiida-core`` directory from source. + + #. Install `git `_ + #. Clone the repository from Github + + .. code-block:: console + + git clone https://github.com/aiidateam/aiida-core + + #. Install `pip `_. + #. Install ``aiida-core``: + + .. code-block:: console + + cd aiida-core + pip install -e . + + The ``-e`` flag installs the package in editable mode which is recommended for development. + Any changes made to the source files are automatically picked up by the installation. + + +.. _installation:guide-complete:python-package:optional-requirements: + +Optional requirements +--------------------- + +The ``aiida-core`` Python package defines a number of optional requirements, subdivided in the following categories: + +* ``atomic_tools`` : Requirements to deal with atomic data and structures +* ``docs`` : Requirements to build the documentation +* ``notebook`` : Requirements to run AiiDA in Jupyter notebooks +* ``pre-commit`` : Requirements to automatically format and lint source code for development +* ``rest`` : Requirements to run the REST API +* ``ssh_kerberos`` : Requirements for enabling SSH authentication through Kerberos +* ``tests`` : Requirements to run the test suite +* ``tui`` : Requirements to provide a textual user interface (TUI) + +These optional requirements can be installed using pip by adding them as comma separated list, for example: + +.. code-block:: console + + pip install aiida-core[atomic_tools,docs] + + +.. _installation:guide-complete:rabbitmq: + +RabbitMQ +======== + +`RabbitMQ `_ is an optional but recommended service for AiiDA. +It is a messsage broker that is required to run AiiDA's daemon. +The daemon is a system process that runs in the background that manages one or multiple daemon workers that can run AiiDA processes. +This way, the daemon helps AiiDA to scale as it is possible to run many processes in parallel on the daemon workers instead of blockingly in a single Python interpreter. +To facilitate communication with the daemon workers, RabbitMQ is required. + +Although it is possible to run AiiDA without a daemon it does provide significant benefits and therefore it is recommended to install RabbitMQ. + +.. tab-set:: + + .. tab-item:: conda + + #. Install `conda `_. + + #. Create an environment and install ``aiida-core.services``: + + .. code-block:: console + + conda create -n aiida -c conda-forge aiida-core.services + + .. important:: + + The ``aiida-core.services`` package ensures that RabbitMQ is installed in the conda environment. + However, it is not a _service_, in the sense that it is not automatically started, but has to be started manually. + + .. code-block:: console + + rabbitmq-server -detached + + Note that this has to be done each time after the machine has been rebooted. + The server can be stopped with: + + .. code-block:: console + + rabbitmqctl stop + + + .. tab-item:: Ubuntu + + #. Install RabbitMQ through the ``apt`` package manager: + + .. code-block:: console + + sudo apt install rabbitmq-server + + This should automatically install startup scripts such that the server is automatically started when the machine boots. + + + .. tab-item:: MacOS X + + #. Install `Homebrew `. + + #. Install RabbitMQ: + + .. code-block:: console + + brew install rabbitmq + brew services start rabbitmq + + .. important:: + + The service has to manually be started each time the machine reboots. + + .. tab-item:: Other + + For all other cases, please refer to the `official documentation `_ of RabbitMQ. + + + +.. _installation:guide-complete:create-profile: + +Create a profile +================ + +After the ``aiida-core`` package is installed, a profile needs to be created. +A profile defines where the data generated by AiiDA is to be stored. +The data storage can be customized through plugins and so the required configuration changes based on the selected storage plugin. + +To create a new profile, run: + +.. code-block:: console + + verdi profile setup + +where ```` is the entry point name of the storage plugin selected for the profile. +To list the available storage plugins, run: + +.. code-block:: console + + verdi plugin list aiida.storage + +AiiDA ships with a number of storage plugins and it is recommended to select one of the following: + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`feather;mr-1` ``core.sqlite_dos`` + :text-align: center + :shadow: md + + Use this for use-cases to explore AiiDA where performance is not critical. + + This storage plugin does not require any services, making it easy to install and use. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: installation:guide-complete:create-profile:core-sqlite-dos + :click-parent: + :expand: + :color: primary + :outline: + + Create a ``core.sqlite_dos`` profile + + .. grid-item-card:: :fa:`bolt;mr-1` ``core.psql_dos`` + :text-align: center + :shadow: md + + Use this for production work where database performance is important. + + This storage plugin uses PostgreSQL for the database and provides the greatest performance. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: installation:guide-complete:create-profile:core-psql-dos + :click-parent: + :expand: + :color: primary + :outline: + + Create a ``core.psql_dos`` profile + + +.. seealso:: + + See the :ref:`topic on storage ` to see a more detailed overview of the storage plugins provided by ``aiida-core`` with their strengths and weaknesses. + +Other packages may provide additional storage plugins, which are also installable through ``verdi profile setup``. + + +.. _installation:guide-complete:create-profile:common-options: + +Common options +-------------- + +The exact options available for the ``verdi profile setup`` command depend on the selected storage plugin, but there are a number of common options and functionality: + +* ``--profile``: The name of the profile. +* ``--set-as-default``: Whether the new profile should be defined as the new default. +* ``--email``: Email for the default user that is created. +* ``--first-name``: First name for the default user that is created. +* ``--last-name``: Last name for the default user that is created. +* ``--institution``: Institution for the default user that is created. +* ``--use-rabbitmq/--no-use-rabbitmq``: Whether to configure the RabbitMQ broker. + Required to enable the daemon and submitting processes to it. + The default is ``--use-rabbitmq``, in which case the command tries to connect to RabbitMQ running on the localhost with default connection parameters. + If this fails, a warning is issued and the profile is configured without a broker. + Once the profile is created, RabbitMQ can still be enabled through ``verdi profile configure-rabbitmq`` which allows to customize the connection parameters. +* ``--non-interactive``: By default, the command prompts to specify a value for all options. + Alternatively, the ``--non-interactive`` flag can be specified, in which case the command never prompts and the options need to be specified directly on the command line. + This is useful when using ``verdi profile setup`` is used in non-interactive environments, such as scripts. +* ``--config``: Instead of passing all options through command line options, the value can be defined in a YAML file and pass its filepath through this option. + + +.. _installation:guide-complete:create-profile:core-sqlite-dos: + +``core.sqlite_dos`` +------------------- + +This storage plugin uses `SQLite `_ and the `disk-objectstore `_ to store data. +The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. +The installation instructions for SQLite depend on your system; please visit the `SQLite website `_ for details. + +Once the prerequisistes are met, create a profile with: + +.. code-block:: console + + verdi profile setup core.sqlite_dos + +The options specific to the ``core.sqlite_dos`` storage plugin are: + +* ``--filepath``: Filepath of the directory in which to store data for this backend. + + +.. _installation:guide-complete:create-profile:core-psql-dos: + +``core.psql_dos`` +----------------- + +This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data. +The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. +The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet. +Instructions for installing PostgreSQL is beyond the scope of this guide. + +.. tip:: + + The creation of the PostgreSQL user and database as explained below is implemented in an automated way in the ``verdi presto`` command. + Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos`` manually, it is possible to run: + + .. code-block:: + + verdi presto --use-postgres + +Before creating a profile, a database (and optionally a custom database user) has to be created. +First, connect to PostgreSQL using ``psql``, the `native command line client for PostgreSQL `_: + +.. code-block:: console + + psql -h -U -W + +If PostgreSQL is installed on the localhost, ```` can be replaced with ``localhost``, and the default ```` is ``postgres``. +While possible to use the ``postgres`` default user for the AiiDA profile, it is recommended to create a custom user: + +.. code-block:: sql + + CREATE USER aiida-user WITH PASSWORD ''; + +replacing ```` with a secure password. +The name ``aiida-user`` is just an example name and can be customized. +Note the selected username and password as they are needed when creating the profile later on. + +After the user has been created, create a database: + +.. code-block:: sql + + CREATE DATABASE aiida-database OWNER aiida-user ENCODING 'UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8'; + +Again, the selected database name ``aiida-database`` is purely an example and can be customized. +Make sure that the ``OWNER`` is set to the user that was created in the previous step. +Next, grant all privileges on this database to the user: + +.. code-block:: sql + + GRANT ALL PRIVILEGES ON DATABASE aiida-database to aiida-user; + +After the database has been created, the interactive ``psql`` shell can be closed. +To test if the database was created successfully, run the following command: + +.. code-block:: console + + psql -h -d -U -W + +replacing ```` and ```` with the chosen names for the database and user in the previous steps, and providing the chosen password when prompted. + +Once the database has been created, create a profile with: + +.. code-block:: console + + verdi profile setup core.psql_dos + +The options specific to the ``core.psql_dos`` storage plugin are: + +* ``--database-engine`` The engine to use to connect to the database. +* ``--database-hostname`` The hostname of the PostgreSQL server. +* ``--database-port`` The port of the PostgreSQL server. +* ``--database-username`` The username with which to connect to the PostgreSQL server. +* ``--database-password`` The password with which to connect to the PostgreSQL server. +* ``--database-name`` The name of the database in the PostgreSQL server. +* ``--repository-uri`` URI to the file repository. + +.. _installation:guide-complete:validate-installation: + + +Validate installation +===================== + +Once a profile has been created, validate that everything is correctly set up with: + +.. code-block:: console + + verdi status + +The output should look something like the following:: + + ✔ version: AiiDA v2.5.1 + ✔ config: /path/.aiida + ✔ profile: profile-name + ✔ storage: SqliteDosStorage[/path/.aiida/repository/profile-name]: open, + ✔ broker: RabbitMQ v3.8.2 @ amqp://guest:guest@127.0.0.1:5672?heartbeat=600 + ⏺ daemon: The daemon is not running. + +If no lines show red crosses, AiiDA has been correctly installed and is ready to go. +When a new profile is created, the daemon will not yet be running, but it can be started using: + +.. code-block:: console + + verdi daemon start + +.. note:: + + The storage information depends on the storage plugin that was selected. + The broker may be shown as not having been configured which occurs for profiles created with the :ref:`quick installation method `. + This is fine, however, :ref:`some functionality is not supported ` for broker-less profiles. + + +.. admonition:: Not all green? + :class: warning + + If the status reports any problems, please refer to the :ref:`troubleshooting section `. diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst new file mode 100644 index 0000000000..af9aaa8dc0 --- /dev/null +++ b/docs/source/installation/guide_quick.rst @@ -0,0 +1,85 @@ +.. _installation:guide-quick: + +======================== +Quick installation guide +======================== + +First, install the ``aiida-core`` Python package: + +.. code-block:: console + + pip install aiida-core + +.. attention:: + + AiiDA requires a recent version of Python. + Please refer to the `Python Package Index `_ for the minimum required version. + +Next, set up a profile where all data is stored: + +.. code-block:: console + + verdi presto + +Verify that the installation was successful: + +.. code-block:: console + + verdi status + +If none of the lines show a red cross, indicating a problem, the installation was successful and you are good to go. + +.. admonition:: What next? + :class: hint + + If you are a new user, we recommend to start with the :ref:`basic tutorial `. + Alternatively, check out the :ref:`next steps guide `. + +.. admonition:: Problems during installation? + :class: warning + + If you encountered any issues, please refer to the :ref:`troubleshooting section `. + +.. warning:: + + Not all AiiDA functionality is supported by the quick installation. + Please refer to the :ref:`section below ` for more information. + + +.. _installation:guide-quick:limitations: + +Quick install limitations +========================= + +Functionality +------------- + +Part of AiiDA's functionality requires a `message broker `_, with the default implementation using `RabbitMQ `_. +The message broker is used to allow communication with the :ref:`daemon `. +Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide sets up a profile that does not require it. +As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run locally. + +.. note:: + The ``verdi presto`` command automatically checks if RabbitMQ is running on the localhost. + If it can successfully connect, it configures the profile with the message broker and therefore the daemon functionality will be available. + +.. tip:: + The connection parameters of RabbitMQ can be (re)configured after the profile is set up with ``verdi profile configure-rabbitmq``. + This can be useful when the RabbitMQ setup is different from the default that AiiDA checks for and the automatic configuration of ``verdi presto`` failed. + + +Performance +----------- + +The quick installation guide by default creates a profile that uses `SQLite `_ for the database. +Since SQLite does not require running a service, it is easy to install and use on essentially any system. +However, for certain use cases it is not going to be the most performant solution. +AiiDA also supports `PostgreSQL `_ which is often going to be more performant compared to SQLite. + +.. tip:: + If a PostgreSQL service is available, run ``verdi presto --use-postgres`` to set up a profile that uses PostgreSQL instead of SQLite. + The command tries to connect to the service and automatically create a user account and database to use for the new profile. + AiiDA provides defaults that work for most setups where PostgreSQL is installed on the localhost. + Should this fail, the connection parameters can be customized using the ``--postgres-hostname``, ``--postgres-port``, ``--postgres-username``, ``--postgres-password`` options. + +Please refer to the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation. diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst new file mode 100644 index 0000000000..38e350ab7a --- /dev/null +++ b/docs/source/installation/index.rst @@ -0,0 +1,97 @@ +.. _installation: + +.. toctree:: + :maxdepth: 2 + :hidden: + + guide_quick + guide_complete + docker + troubleshooting + +============ +Installation +============ + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`rocket;mr-1` Quick install + :text-align: center + :shadow: md + + Install AiiDA in the most simple way that should work on most systems. + Choose this method if you are new to AiiDA and simply want to try it out. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: guide_quick + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the quick installation guide + + .. grid-item-card:: :fa:`info-circle;mr-1` Complete installation guide + :text-align: center + :shadow: md + + Install AiiDA with full control over the configuration. + Choose this method if you are an advanced user or you want to optimize the setup for your system. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: guide_complete + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the complete installation guide + + +Preinstalled environments +========================= + +Instead of installing AiiDA manually, there are also solutions that provide an environment with AiiDA and its requirements installed and pre-configured: + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`cube;mr-1` Docker + :text-align: center + :shadow: md + + AiiDA provides a number of Docker containers that come with the batteries included. + This is a great option if you are already familiar with Docker. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: docker + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the Docker installation guide + + .. grid-item-card:: :fa:`cloud;mr-1` Virtual machine + :text-align: center + :shadow: md + + Quantum Mobile is a Virtual Machine for computational materials science. + It comes with AiiDA installed as well as several materials simulation codes. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-link:: https://quantum-mobile.readthedocs.io/en/latest/ + :click-parent: + :expand: + :color: primary + :outline: + + To the Quantum Mobile website diff --git a/docs/source/intro/troubleshooting.rst b/docs/source/installation/troubleshooting.rst similarity index 97% rename from docs/source/intro/troubleshooting.rst rename to docs/source/installation/troubleshooting.rst index b90232fc2a..e70b82a17b 100644 --- a/docs/source/intro/troubleshooting.rst +++ b/docs/source/installation/troubleshooting.rst @@ -1,4 +1,4 @@ -.. _intro:troubleshooting: +.. _installation:troubleshooting: *************** Troubleshooting @@ -21,12 +21,12 @@ In the example output, all service have a green check mark and so should be runn If all services are up and running and you are still experiencing problems or if you have trouble with the installation of aiida-core and related services, consider the commonly encountered problems below. In case you are still experiencing problems, you can request support by opening a post on the `Discourse server `_. -.. _intro:troubleshooting:installation: +.. _installation:troubleshooting:installation: Installation issues ------------------- -.. _intro:troubleshooting:installation:rabbitmq: +.. _installation:troubleshooting:installation:rabbitmq: RabbitMQ incompatibility ........................ @@ -136,16 +136,16 @@ A way to do it is to add a line similar to the following to the ``~/.bashrc`` an .. _Stackoverflow link: http://stackoverflow.com/questions/21079820/how-to-find-pg-config-pathlink -.. _intro:troubleshooting:installation:postgresql-autodetect-issues: +.. _installation:troubleshooting:installation:postgresql-autodetect-issues: Autodetection of the PostgreSQL setup ..................................... -Sometimes AiiDA fails to autodetect the local configuration of PostgreSQL when running ``verdi quicksetup``. +Sometimes AiiDA fails to autodetect the local configuration of PostgreSQL when running ``verdi presto --use-postgres``. In that case try to: - 1. Create the database manually in PostgreSQL (see :ref:`here`). - 2. Then run the full ``verdi setup`` command (see :ref:`here`). + 1. Create the database manually in PostgreSQL (see :ref:`here`). + 2. Then run the full ``verdi profile setup core.psql_dos``. RabbitMQ Installation (Unix) diff --git a/docs/source/intro/cheatsheet.rst b/docs/source/intro/cheatsheet.rst deleted file mode 100644 index f91c7cc698..0000000000 --- a/docs/source/intro/cheatsheet.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _intro:cheatsheet: - -===================== -The AiiDA cheat sheet -===================== - -The AiiDA cheat sheet gives a broad overview of the most commonly used `verdi` commands, the inheritance hierarchy of the main AiiDA classes, their attributes and methods, as well as a showcase of the `QueryBuilder`. - -When clicking on the embedded image, the pdf version will be opened in the browser. Where applicable, text elements contain hyperlinks to the relevant sections of the documentation. - -The file can also be :download:`downloaded <_cheatsheet/cheatsheet_v.pdf>` in two-page layout for printing. - -Happy exploring! - -.. image:: ./_cheatsheet/cheatsheet.png - :target: ../_static/cheatsheet_h.pdf diff --git a/docs/source/intro/get_started.rst b/docs/source/intro/get_started.rst deleted file mode 100644 index 52d1bb1e89..0000000000 --- a/docs/source/intro/get_started.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. _intro:get_started: - -**************** -Getting started -**************** - -An AiiDA installation consists of three core components (plus any external codes you wish to run): - -* |aiida-core|: The main Python package and the associated ``verdi`` command line interface -* |PostgreSQL|: The service that manages the database that AiiDA uses to store data. -* |RabbitMQ|: The message broker used for communication within AiiDA. - -.. toctree:: - :maxdepth: 1 - :hidden: - - install_system - install_conda - run_docker - installation - -.. _intro:install:setup: -.. _intro:get_started:setup: - -Setup -===== - -There are multiple routes to setting up a working AiiDA environment. -Which of those is optimal depends on your environment and use case. -If you are unsure, use the :ref:`system-wide installation ` method. - -.. grid:: 1 2 2 2 - :gutter: 3 - - .. grid-item-card:: :fa:`desktop;mr-1` System-wide installation - - .. button-ref:: intro:get_started:system-wide-install - :ref-type: ref - :click-parent: - :class: btn-link - - Install all software directly on your workstation or laptop. - - Install the prerequisite services using standard package managers (apt, homebrew, etc.) with administrative privileges. - - .. grid-item-card:: :fa:`folder;mr-1` Installation into Conda environment - - .. button-ref:: intro:get_started:conda-install - :ref-type: ref - :click-parent: - :class: btn-link - - Install all software into an isolated conda environment. - - This method does not require administrative privileges, but involves manual management of start-up and shut-down of services. - - .. grid-item-card:: :fa:`cube;mr-1` Run via docker container - - .. button-ref:: intro:get_started:docker - :ref-type: ref - :click-parent: - :class: btn-link - - Run AiiDA and prerequisite services as a single docker container. - - Does not require the separate installation of prerequisite services. - Especially well-suited to get directly started on the **tutorials**. - - .. grid-item-card:: :fa:`cloud;mr-1` Run via virtual machine - - .. button-link:: https://quantum-mobile.readthedocs.io/ - :click-parent: - :class: btn-link - - Use a virtual machine with all the required software pre-installed. - - `Materials Cloud `__ provides both downloadable and web based VMs, - also incorporating pre-installed Materials Science codes. - -.. _intro:get_started:next: - -What's next? -============ - -After successfully completing one of the above outlined setup routes, if you are new to AiiDA, we recommed you go through the :ref:`Basic Tutorial `, -or see our :ref:`Next steps guide `. - -If however, you encountered some issues, proceed to the :ref:`troubleshooting section `. - -.. admonition:: In-depth instructions - :class: seealso title-icon-read-more - - For more detailed instructions on configuring AiiDA, :ref:`see the configuration how-to `. - -.. |aiida-core| replace:: `aiida-core `__ -.. |PostgreSQL| replace:: `PostgreSQL `__ -.. |RabbitMQ| replace:: `RabbitMQ `__ -.. |Homebrew| replace:: `Homebrew `__ diff --git a/docs/source/intro/index.rst b/docs/source/intro/index.rst index 779def1ae1..eb62171c21 100644 --- a/docs/source/intro/index.rst +++ b/docs/source/intro/index.rst @@ -1,15 +1,13 @@ +.. _intro: + +.. toctree:: + :maxdepth: 1 + :hidden: + ============ Introduction ============ -.. _intro:about: - - -************* -What is AiiDA -************* - - AiiDA is an open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and reproducing the complex workflows associated with modern computational science and all associated data. AiiDA is built to support and streamline the four core pillars of the ADES model: Automation, Data, Environment, and Sharing (described `here `__). Some of the key features of AiiDA include: @@ -24,12 +22,3 @@ AiiDA is built to support and streamline the four core pillars of the ADES model * **Open source:** AiiDA is released under the `MIT open-source license `__. See also the `list of AiiDA-powered scientific publications `__ and `testimonials from AiiDA users `__. - - -.. toctree:: - :maxdepth: 1 - - get_started - ../tutorials/index - cheatsheet - troubleshooting diff --git a/docs/source/intro/install_conda.rst b/docs/source/intro/install_conda.rst deleted file mode 100644 index 1f8a3bfd82..0000000000 --- a/docs/source/intro/install_conda.rst +++ /dev/null @@ -1,163 +0,0 @@ -.. _intro:get_started:conda-install: - -*********************************** -Installation into Conda environment -*********************************** - -This installation route installs all necessary software -- including the prerequisite services PostgreSQL and RabbitMQ -- into a Conda environment. -This is the recommended method for users on shared systems and systems where the user has no administrative privileges. -If you want to install AiiDA onto you own personal workstation/laptop, it is recommanded to use the :ref:`system-wide installation `. - -.. important:: - - This installation method installs **all** software into a conda environment, including PostgreSQL and RabbitMQ. - See the :ref:`system-wide installation ` to use Conda only to install the AiiDA (core) Python package. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install prerequisite services + AiiDA (core) - - *Install the aiida-core package and all required services in a Conda environment.* - - #. We strongly recommend using ``mamba`` instead of the default ``conda`` (or environment resolution may time out). - Consider using `Mambaforge `_ when starting from scratch, or ``conda install -c conda-forge mamba``. - - #. Open a terminal and execute: - - .. code-block:: console - - $ mamba create -n aiida -c conda-forge aiida-core aiida-core.services - $ mamba activate aiida - - .. grid-item-card:: Start-up services and initialize data storage - - Before working with AiiDA, you must first initialize a database storage area on disk. - - .. code-block:: console - - (aiida) $ initdb -D mylocal_db - - This *database cluster* (located inside a folder named ``mylocal_db``) may contain a collection of databases (one per profile) that is managed by a single running server process. - We start this process with: - - .. code-block:: console - - (aiida) $ pg_ctl -D mylocal_db -l logfile start - - .. tip:: - - The default port ``5432`` may already be in use by another process. - In this case, you can pass the ``-o "-F -p "`` option to the ``pg_ctl`` command, ```` being the desired port number. - Then for the ``psql`` command, you can pass the ``-p `` option. - - .. admonition:: Further Reading - :class: seealso title-icon-read-more - - - `Creating a Database Cluster `__. - - `Starting the Database Server `__. - - - - Then, start the RabbitMQ server: - - .. code-block:: console - - (aiida) $ rabbitmq-server -detached - - .. important:: - - The services started this way will use the default ports on the machine. - Conflicts may happen if there are more than one user running AiiDA this way on the same machine, or you already have the server running in a system-wide installation. - To get around this issue, you can explicitly define the ports to be used. - - .. grid-item-card:: Setup profile - - Next, set up an AiiDA configuration profile and related data storage, with the ``verdi quicksetup`` command. - - .. code-block:: console - - (aiida) $ verdi quicksetup - Info: enter "?" for help - Info: enter "!" to ignore the default and set no value - Profile name: me - Email Address (for sharing data): me@user.com - First name: my - Last name: name - Institution: where-i-work - - .. tip:: - - In case of non-default ports are used for the *database cluster* and the RabbitMQ server, you can pass them using ``--db-port`` and ``--broker-port`` options respectively. - - - .. admonition:: Is AiiDA unable to auto-detect the PostgreSQL setup? - :class: attention title-icon-troubleshoot - - If you get an error saying that AiiDA has trouble autodetecting the PostgreSQL setup, you will need to do the manual setup explained in the :ref:`troubleshooting section`. - - Once the profile is up and running, you can start the AiiDA daemon(s): - - .. code-block:: console - - (aiida) $ verdi daemon start 2 - - .. important:: - - The verdi daemon(s) must be restarted after a system reboot. - - .. tip:: - - Do not start more daemons then there are physical processors on your system. - - .. grid-item-card:: Check setup - - To check that everything is set up correctly, execute: - - .. code-block:: console - - (aiida) $ verdi status - ✓ version: AiiDA v2.0.0 - ✓ config: /path/to/.aiida - ✓ profile: default - ✓ storage: Storage for 'default' @ postgresql://username:***@localhost:5432/db_name / file:///path/to/repository - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 2809 since 2019-03-15 16:27:52 - - .. admonition:: Missing a checkmark or ecountered some other issue? - :class: attention title-icon-troubleshoot - - :ref:`See the troubleshooting section `. - - .. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? - - .. grid-item-card:: Shut-down services - - After finishing with your aiida session, particularly if switching between profiles, you may wish to power down the daemon and the services: - - .. code-block:: console - - (aiida) $ verdi daemon stop - (aiida) $ pg_ctl -D mylocal_db stop - (aiida) $ rabbitmqctl stop - - .. grid-item-card:: Restart the services - - If you want to restart the services and the daemon: - - .. code-block:: console - - (aiida) $ pg_ctl -D mylocal_db start - (aiida) $ rabbitmq-server -detached - (aiida) $ verdi daemon start - - .. tip:: - - If different ports are used, you have to pass them here as well. diff --git a/docs/source/intro/install_system.rst b/docs/source/intro/install_system.rst deleted file mode 100644 index 48464cc981..0000000000 --- a/docs/source/intro/install_system.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. _intro:get_started:system-wide-install: - -************************ -System-wide installation -************************ - -The system-wide installation will install the prerequisite services (PostgreSQL and RabbitMQ) via standard package managers such that their startup and shut-down is largely managed by the operating system. -The AiiDA (core) Python package is then installed either with Conda or pip. - -.. warning:: RabbitMQ v3.5 and below are EOL and not supported at all. For versions RabbitMQ v3.8.15 and up, AiiDA is not compatible with default server configurations. For details refer to the :ref:`dedicated troubleshooting section`. - -This is the *recommended* installation method to setup AiiDA on a personal laptop or workstation for the majority of users. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install prerequisite services - - AiiDA is designed to run on `Unix `_ operating systems and requires a `bash `_ or `zsh `_ shell, and Python >= 3.7. - - .. tab-set:: - - .. tab-item:: Ubuntu - - *AiiDA is tested on Ubuntu versions 16.04, 18.04, and 20.04.* - - Open a terminal and execute: - - .. code-block:: console - - $ sudo apt install git python3-dev python3-pip postgresql postgresql-server-dev-all postgresql-client rabbitmq-server - - .. tab-item:: MacOS X (Homebrew) - - The recommended installation method for Mac OS X is to use `Homebrew `__. - - #. Follow `this guide `__ to install Homebrew on your system if not installed yet. - - #. Open a terminal and execute: - - .. code-block:: console - - $ brew install postgresql rabbitmq git python - $ brew services start postgresql - $ brew services start rabbitmq - - .. tab-item:: Windows Subsystem for Linux - - *The following instructions are for setting up AiiDA on WSL 1/2 in combination with Ubuntu.* - - #. Installing RabbitMQ: - - * (WSL 1) Install and start the `Windows native RabbitMQ `_. - - * (WSL 2) Install RabbitMQ inside the the WSL: - - .. code-block:: console - - $ sudo apt install rabbitmq-server - - then start the ``rabbitmq`` server: - - .. code-block:: console - - $ sudo service rabbitmq-server start - - #. Install Python and PostgreSQL: - - .. code-block:: console - - $ sudo apt install postgresql postgresql-server-dev-all postgresql-client git python3-dev python-pip - - then start the PostgreSQL server: - - .. code-block:: console - - $ sudo service postgresql start - - .. dropdown:: How to setup WSL to automatically start services after system boot. - - Create a file ``start_aiida_services.sh`` containing the following lines: - - .. code-block:: console - - $ service postgresql start - $ service rabbitmq-server start # Only for WSL 2! - - and store it in your preferred location, e.g., the home directory. - Then make the file executable, and editable only by root users with: - - .. code-block:: console - - $ chmod a+x,go-w /path/to/start_aiida_services.sh - $ sudo chown root:root /path/to/start_aiida_services.sh - - Next, run - - .. code-block:: console - - $ sudo visudo - - and add the line - - .. code-block:: sh - - ALL=(root) NOPASSWD: /path/to/start_aiida_services.sh - - replacing ```` with your Ubuntu username. - This will allow you to run *only* this specific ``.sh`` file with ``root`` access (without password), without lowering security on the rest of your system. - - Now you can use the Windows Task Scheduler to automatically execute this file on startup: - - #. Open Task Scheduler. - - #. In the "Actions" menu, click "Create Task". - - #. In "General/Security options", select "Run whether user is logged on or not". - - #. In the "Triggers" tab, click "New...". - - #. In the "Begin the task:" dropdown, select "At startup". - - #. Click "OK" to confirm. - - #. In the "Actions" tab, click "New...". - - #. In the "Action" dropdown, select "Start a program". - - #. In the "Program/script" text field, add ``C:\Windows\System32\bash.exe``. - - #. In the "Add arguments (optional)" text field, add ``-c "sudo /path/to/start_aiida_services.sh"``. - - #. Click "OK" to confirm. - - #. Click "OK" to confirm the task. - - You can tweak other details of this task to fit your needs. - - .. tab-item:: Other - - #. Install RabbitMQ following the `instructions applicable to your system `__. - #. Install PostgreSQL following the `instructions applicable to your system `__. - - .. tip:: - - Alternatively use the :ref:`pure conda installation method `. - - .. grid-item-card:: Install AiiDA (core) - - .. tab-set:: - - .. tab-item:: pip + venv - - *Install the aiida-core package from PyPI into a virtual environment.* - - Open a terminal and execute: - - .. code-block:: console - - $ python -m venv ~/envs/aiida - $ source ~/envs/aiida/bin/activate - (aiida) $ pip install aiida-core - - .. important:: - - Make sure the ``python`` executable is for a Python version that is supported by AiiDA. - You can see the version using: - - .. code-block:: console - - $ python --version - - You can find the supported Python versions for the latest version of AiiDA `on the PyPI page `__. - - .. tip:: - - See the `venv documentation `__ if the activation command fails. - The exact command for activating a virtual environment differs slightly based on the used shell. - - .. dropdown:: :fa:`plus-circle` Installation extras - - There are additional optional packages that you may want to install, which are grouped in the following categories: - - * ``atomic_tools``: packages that allow importing and manipulating crystal structure from various formats - * ``ssh_kerberos``: adds support for ssh transport authentication through Kerberos - * ``REST``: allows a REST server to be ran locally to serve AiiDA data - * ``docs``: tools to build the documentation - * ``notebook``: jupyter notebook - to allow it to import AiiDA modules - * ``tests``: python modules required to run the automatic unit tests - * ``pre-commit``: pre-commit tools required for developers to enable automatic code linting and formatting - - In order to install any of these package groups, simply append them as a comma separated list in the ``pip`` install command, for example: - - .. code-block:: console - - (aiida) $ pip install aiida-core[atomic_tools,docs] - - .. dropdown:: :fa:`wrench` Kerberos on Ubuntu - - If you are installing the optional ``ssh_kerberos`` and you are on Ubuntu you might encounter an error related to the ``gss`` package. - To fix this you need to install the ``libffi-dev`` and ``libkrb5-dev`` packages: - - .. code-block:: console - - $ sudo apt-get install libffi-dev libkrb5-dev - - .. tab-item:: Conda - - *Install the aiida-core package in a Conda environment.* - - #. Make sure that conda is installed, e.g., by following `the instructions on installing Miniconda `__. - - #. Open a terminal and execute: - - .. code-block:: console - - $ conda create -yn aiida -c conda-forge aiida-core - $ conda activate aiida - - .. tab-item:: From source - - *Install the aiida-core package directly from the cloned repository.* - - Open a terminal and execute: - - .. code-block:: console - - $ git clone https://github.com/aiidateam/aiida-core.git - $ cd aiida-core/ - $ python -m venv ~/envs/aiida - $ source ~/envs/aiida/bin/activate - (aiida) $ pip install . - - .. grid-item-card:: Setup profile - - Next, set up an AiiDA configuration profile and related data storage, with the ``verdi quicksetup`` command. - - .. code-block:: console - - (aiida) $ verdi quicksetup - Info: enter "?" for help - Info: enter "!" to ignore the default and set no value - Profile name: me - Email Address (for sharing data): me@user.com - First name: my - Last name: name - Institution: where-i-work - - .. admonition:: Is AiiDA unable to auto-detect the PostgreSQL setup? - :class: attention title-icon-troubleshoot - - If you get an error saying that AiiDA has trouble autodetecting the PostgreSQL setup, you will need to do the manual setup explained in the :ref:`troubleshooting section`. - - .. grid-item-card:: Start verdi daemons - - Start the verdi daemon(s) that are used to run AiiDA workflows. - - .. code-block:: console - - (aiida) $ verdi daemon start 2 - - .. important:: - - The verdi daemon(s) must be restarted after a system reboot. - - .. tip:: - - Do not start more daemons then there are physical processors on your system. - - .. grid-item-card:: Check setup - - To check that everything is set up correctly, execute: - - .. code-block:: console - - (aiida) $ verdi status - ✓ version: AiiDA v2.0.0 - ✓ config: /path/to/.aiida - ✓ profile: default - ✓ storage: Storage for 'default' @ postgresql://username:***@localhost:5432/db_name / file:///path/to/repository - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 2809 since 2019-03-15 16:27:52 - - At this point you should now have a working AiiDA environment, from which you can add and retrieve data. - - .. admonition:: Missing a checkmark or encountered some other issue? - :class: attention title-icon-troubleshoot - - :ref:`See the troubleshooting section `. - - .. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? diff --git a/docs/source/intro/installation.rst b/docs/source/intro/installation.rst deleted file mode 100644 index 479ba53666..0000000000 --- a/docs/source/intro/installation.rst +++ /dev/null @@ -1,295 +0,0 @@ -.. _intro:install: -.. _intro:advanced-config: - -********************** -Advanced configuration -********************** - -This chapter covers topics that go beyond the :ref:`standard setup of AiiDA `. -If you are new to AiiDA, we recommend you first go through the :ref:`Basic Tutorial `, -or see our :ref:`Next steps guide `. - -.. _intro:install:database: - -Creating the database ---------------------- - -AiiDA uses a database to store the nodes, node attributes and other information, allowing the end user to perform fast queries of the results. -Currently, the highly performant `PostgreSQL`_ database is supported as a database backend. - -.. _PostgreSQL: https://www.postgresql.org/downloads - -.. admonition:: Find out more about the database - :class: seealso title-icon-read-more - - - `Creating a Database Cluster `__. - - `Starting the Database Server `__. - - :ref:`The database topic `. - -To manually create the database for AiiDA, you need to run the program ``psql`` to interact with postgres. -On most operating systems, you need to do so as the ``postgres`` user that was created upon installing the software. -To assume the role of ``postgres`` run as root: - -.. code-block:: console - - $ su - postgres - -(or, equivalently, type ``sudo su - postgres``, depending on your distribution) and launch the postgres program: - -.. code-block:: console - - $ psql - -.. tip:: - - If you have installed PostgreSQL through Conda and you see an error like ``psql: FATAL: role "" does not exist`` or ``psql: FATAL: database "" does not exist``, the default role and database apparently do no exist. - The command ``psql -l`` prints the list of existing databases and the associated roles. - You can try connecting to one of those by using the ``-d`` and ``-U`` option to specify the database and role, respectively, for example, ``psql -d template0 -U some-role``. - -Create a new database user account for AiiDA by running: - -.. code-block:: sql - - CREATE USER aiida WITH PASSWORD ''; - -replacing ```` with a password of your choice. - -You will need to provide the password again when you configure AiiDA to use this database through ``verdi setup``. -If you want to change the password you just created use the command: - -.. code-block:: sql - - ALTER USER aiida PASSWORD ''; - -Next, we create the database itself. We enforce the UTF-8 encoding and specific locales: - -.. code-block:: sql - - CREATE DATABASE aiidadb OWNER aiida ENCODING 'UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8' TEMPLATE=template0; - -and grant all privileges on this DB to the previously-created ``aiida`` user: - -.. code-block:: sql - - GRANT ALL PRIVILEGES ON DATABASE aiidadb to aiida; - -You have now created a database for AiiDA and you can close the postgres shell by typing ``\q``. -To test if the database was created successfully, you can run the following command as a regular user in a bash terminal: - -.. code-block:: console - - $ psql -h localhost -d aiidadb -U aiida -W - -and type the password you inserted before, when prompted. -If everything worked well, you should get no error and see the prompt of the ``psql`` shell. - -If you use the same names as in the example commands above, then during the ``verdi setup`` phase the following parameters will apply to the newly created database: - -.. code-block:: console - - $ Database engine: postgresql_psycopg2 - $ Database host: localhost - $ Database port: 5432 - $ AiiDA Database name: aiidadb - $ AiiDA Database user: aiida - $ AiiDA Database password: - -.. admonition:: Don't forget to backup your database! - :class: tip title-icon-tip - - See the :ref:`Database backup how-to `), and :ref:`how to move your database `. - -Database setup using 'peer' authentication ------------------------------------------- - -On Ubuntu Linux, the default PostgreSQL setup is configured to use ``peer`` authentication, which allows password-less login via local Unix sockets. -In this mode, PostgreSQL compares the Unix user connecting to the socket with its own database of users and allows a connection if a matching user exists. - -.. note:: - - This is an alternative route to set up your database - the standard approach will work on Ubuntu just as well. - -Below we are going to take advantage of the command-line utilities shipped on Ubuntu to simplify creating users and databases compared to issuing the SQL commands directly. - -Assume the role of ``postgres``: - -.. code-block:: console - - $ sudo su postgres - -Create a database user with the **same name** as the UNIX user who will be running AiiDA (usually your login name): - -.. code-block:: console - - $ createuser - -replacing ```` with your username. - -Next, create the database itself with your user as the owner: - -.. code-block:: console - - $ createdb -O aiidadb - -Exit the shell to go back to your login user. -To test if the database was created successfully, try: - -.. code-block:: console - - $ psql aiidadb - -During the ``verdi setup`` phase, use ``!`` to leave host empty and specify your Unix user name as the *AiiDA Database user*.: - -.. code-block:: console - - $ Database engine: postgresql_psycopg2 - $ Database host: ! - $ Database port: 5432 - $ AiiDA Database name: aiidadb - $ AiiDA Database user: - $ AiiDA Database password: "" - - -RabbitMQ configuration ----------------------- - -In most normal setups, RabbitMQ will be installed and run as a service on the same machine that hosts AiiDA itself. -In that case, using the default configuration proposed during a profile setup will work just fine. -However, when the installation of RabbitMQ is not standard, for example it runs on a different port, or even runs on a completely different machine, all relevant connection details can be configured with ``verdi setup``. - -The following parameters can be configured: - -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Parameter | Option | Default | Explanation | -+==============+===========================+===============+=========================================================================================================================+ -| Protocol | ``--broker-protocol`` | ``amqp`` | The protocol to use, can be either ``amqp`` or ``amqps`` for SSL enabled connections. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Username | ``--broker-username`` | ``guest`` | The username with which to connect. The ``guest`` account is available and usable with a default RabbitMQ installation. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Password | ``--broker-password`` | ``guest`` | The password with which to connect. The ``guest`` account is available and usable with a default RabbitMQ installation. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Host | ``--broker-host`` | ``127.0.0.1`` | The hostname of the RabbitMQ server. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Port | ``--broker-port`` | ``5672`` | The port to which the server listens. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Virtual host | ``--broker-virtual-host`` | ``''`` | Optional virtual host. Should not contain the leading forward slash, this will be added automatically by AiiDA. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Parameters | not available | n.a. | These are additional broker parameters that are typically encoded as URL parameters, for example, to specify SSL | -| | | | parameters such as the filepath to the certificate that is to be used. The parameters are currently not definable | -| | | | through the CLI but have to be added manually in the ``config.json``. A key ``broker_parameters`` should be added that | -| | | | is a dictionary, which can contain fields: ``cafile``, ``capath``, ``cadata``, ``certfile``, ``keyfile`` and | -| | | | ``no_verify_ssl``. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ - - -.. _intro:install:verdi_setup: - -verdi setup ------------ - -After the database has been created, do: - -.. code-block:: console - - $ verdi setup --profile - -where `` is a profile name of your choosing. -The ``verdi setup`` command will guide you through the setup process through a series of prompts. - -The first information asked is your email, which will be used to associate the calculations to you. -In AiiDA, the email is your username, and acts as a unique identifier when importing/exporting data from AiiDA. - -.. note:: - - The password, in the current version of AiiDA, is not used (it will be used only in the REST API and in the web interface). - If you leave the field empty, no password will be set and no access will be granted to the user via the REST API and the web interface. - -Then, the following prompts will help you configure the database. Typical settings are: - -.. code-block:: console - - $ Default user email: richard.wagner@leipzig.de - $ Database engine: postgresql_psycopg2 - $ PostgreSQL host: localhost - $ PostgreSQL port: 5432 - $ AiiDA Database name: aiida_dev - $ AiiDA Database user: aiida - $ AiiDA Database password: - $ AiiDA repository directory: /home/wagner/.aiida/repository/ - [...] - Configuring a new user with email 'richard.wagner@leipzig.de' - $ First name: Richard - $ Last name: Wagner - $ Institution: BRUHL, LEIPZIG - $ The user has no password, do you want to set one? [y/N] y - $ Insert the new password: - $ Insert the new password (again): - -.. admonition:: Don't forget to backup your data! - :class: tip title-icon-tip - - See the :ref:`installation backup how-to `. - -.. _intro:install:start_daemon: - -Managing the daemon -------------------- - -The AiiDA daemon process runs in the background and takes care of processing your submitted calculations and workflows, checking their status, retrieving their results once they are finished and storing them in the AiiDA database. - -The AiiDA daemon is controlled using three simple commands: - -* ``verdi daemon start``: start the daemon -* ``verdi daemon status``: check the status of the daemon -* ``verdi daemon stop``: stop the daemon - -.. note:: - - While operational, the daemon logs its activity to a file in ``~/.aiida/daemon/log/`` (or, more generally, ``$AIIDA_PATH/.aiida/daemon/log``). - Get the latest log messages via ``verdi daemon logshow``. - -.. _intro:install:jupyter: - -Using AiiDA in Jupyter ----------------------- - - 1. Install the AiiDA ``notebook`` extra **inside** the AiiDA python environment, e.g. by running ``pip install aiida-core[notebook]``. - - -With this setup, you're ready to use AiiDA in Jupyter notebooks. - -Start a Jupyter notebook server: - -.. code-block:: console - - $ jupyter notebook - -This will open a tab in your browser. Click on ``New -> Python``. - -To load the `aiida` magics extension, simply run: - -.. code-block:: ipython - - %load_ext aiida - -Now you can load a profile (the default unless specified) by: - -.. code-block:: ipython - - %aiida - -After executing the cell by ``Shift-Enter``, you should receive the message "Loaded AiiDA DB environment." -Otherwise, you can load the profile manually as you would in a Python script: - -.. code-block:: python - - from aiida import load_profile, orm - load_profile() - qb = orm.QueryBuilder() - # ... - -You can also run `verdi` CLI commands, using the currently loaded profile, by: - -.. code-block:: ipython - - %verdi status diff --git a/docs/source/intro/run_docker.rst b/docs/source/intro/run_docker.rst deleted file mode 100644 index 227dd4b263..0000000000 --- a/docs/source/intro/run_docker.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. _intro:get_started:docker: -.. _intro:install:docker: - -**************************** -Run AiiDA via a Docker image -**************************** - -The AiiDA team maintains a `Docker `__ image on `Docker Hub `__. -This image contains a fully pre-configured AiiDA environment which makes it particularly useful for learning and developing purposes. - -.. caution:: - - All data stored in a container will persist only over the lifetime of that particular container (i.e., removing the container will also purge the data) unless you use volumes to persist the data, see :ref:`Advanced usage ` for more details. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install Docker on your PC - - Docker is available for Windows, Mac and Linux and can be installed in different ways. - - .. tab-set:: - - .. tab-item:: Colima on MacOS - - `Colima `_ is a new open-source project that makes it easy to run Docker on MacOS. - It is a lightweight alternative to Docker Engine with a focus on simplicity and performance. - - Colima is the recommended way. - With colima, you can have multiple Docker environments running at the same time, each with its own Docker daemon and resource allocation thus avoiding conflicts. - - To install the colima, on MacOS run: - - .. code-block:: console - - $ brew install colima - - Or check Check `here `__ for other installation options. - - After installation, start the docker daemon by: - - .. code-block:: console - - $ colima start - - .. tab-item:: Docker CE on Linux - - The bare minimum to run Docker on Linux is to install the `Docker Engine `_. - If you don't need a graphical user interface, this is the recommended way to install Docker. - - .. note:: - - You will need `root` privileges to perform the `post-installation steps `_. - Otherwise, you will need to use `sudo` for every Docker command. - - - - .. grid-item-card:: Start/stop container and use AiiDA interactively - - Start the image with the `docker command line interface (docker CLI) `_. - - There are differnt tags available for the AiiDA image, the ``latest`` tag is the image with the most recent stable version of ``aiida-core`` installed in the container. - You can replace the ``latest`` tag with the ``aiida-core`` or services version you want to use, check the `Docker Hub `_ for available tags. - - .. tab-set:: - - .. tab-item:: Docker CLI - - Use the Docker CLI to run the AiiDA container. - - .. code-block:: console - - $ docker run -it --name aiida-container-demo aiidateam/aiida-core-with-services:latest bash - - The ``-it`` option is used to run the container in interactive mode and to allocate a pseudo-TTY. - You will be dropped into a bash shell inside the container. - - You can specify a name for the container with the ``--name`` option for easier reference later on. - For the quick test, you can also use the ``--rm`` option to remove the container when it exits. - In the following examples, we will use the name ``aiida-container-demo`` for the container. - - To exit and stop the container, type ``exit`` or press ``Ctrl+D``. - - Please note the ``run`` sub-command is used to both create and start a container. - In order to start a container which is already created, you should use ``start``, by running: - - .. code-block:: console - - $ docker start -i aiida-container-demo - - If you need another shell inside the container, run: - - .. code-block:: console - - $ docker exec -it aiida-container-demo bash - - By default, an AiiDA profile is automatically set up inside the container. - To disable this default profile being created, set the ``SETUP_DEFAULT_AIIDA_PROFILE`` environment variable to ``false``. - - The following environment variables can be set to configure the default AiiDA profile: - - * ``AIIDA_PROFILE_NAME``: the name of the profile to be created (default: ``default``) - * ``AIIDA_USER_EMAIL``: the email of the default user to be created (default: ``aiida@localhost``) - * ``AIIDA_USER_FIRST_NAME``: the first name of the default user to be created (default: ``Giuseppe``) - * ``AIIDA_USER_LAST_NAME``: the last name of the default user to be created (default: ``Verdi``) - * ``AIIDA_USER_INSTITUTION``: the institution of the default user to be created (default: ``Khedivial``) - * ``AIIDA_CONFIG_FILE``: the path to the AiiDA configuration file used for other profile configuration parameters (default: ``/aiida/assets/config-quick-setup.yaml``). - - These environment variables can be set when starting the container with the ``-e`` option. - - Please note that the ``AIIDA_CONFIG_FILE`` variable points to a path inside the container. - Therefore, if you want to use a custom configuration file, it needs to be mounted from the host path to the container path. - - .. grid-item-card:: Check setup - - The profile named ``default`` is created under the ``aiida`` user. - - To check the status of AiiDA environment setup, execute the following command inside the container shell: - - .. code-block:: console - - $ verdi status - ✓ config dir: /home/aiida/.aiida - ✓ profile: On profile default - ✓ repository: /home/aiida/.aiida/repository/default - ✓ postgres: Connected as aiida_qs_aiida_477d3dfc78a2042156110cb00ae3618f@localhost:5432 - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 1795 since 2020-05-20 02:54:00 - - -Advanced usage -============== - -.. _intro:install:docker:advanced_usage: - -Congratulations! You have a working AiiDA environment, and can start using it. - -If you use the Docker image for development or production, you will likely need additional settings such as clone the repository and install `aiida-core` in the editable mode to make it work as expected. -See `development wiki `_ for more details. - -.. dropdown:: Copy files from your computer to the container - - .. tab-set:: - - .. tab-item:: Docker CLI - - Use the ``docker cp`` command if you need to copy files from your computer to the container or vice versa. - - For example, to copy a file named ``test.txt`` from your current working directory to the ``/home/aiida`` path in the container, run: - - .. code-block:: console - - $ docker cp test.txt aiida-container-demo:/home/aiida - - -.. dropdown:: Persist data across different containers - - The lifetime of the data stored in a container is limited to the lifetime of that particular container. - - If you stop the container (``docker stop`` or simply ``Ctrl+D`` from the container) and start it again, any data you created will persist. - However, if you remove the container, **all data will be removed as well**. - - .. code-block:: console - - $ docker rm aiida-container-demo - - The preferred way to persistently store data across Docker containers is to `create a volume `__. - - .. tab-set:: - - .. tab-item:: Docker CLI - - To create a simple volume, run: - - .. code-block:: console - - $ docker volume create container-home-data - - In this case, one needs to specifically mount the volume very first time that the container is being created: - - .. code-block:: console - - $ docker run -it --name aiida-container-demo -v container-home-data:/home/aiida aiidateam/aiida-core-with-services:latest bash - - Starting the container with the above command ensures that any data stored in the ``/home/aiida`` path within the container is stored in the ``container-home-data`` volume and therefore persists even if the container is removed. - - When installing packages with pip, use the ``--user`` flag to store the Python packages installed in the mounted volume (if you mount the home specifically to a volume as mentioned above) permanently. - The packages will be installed in the ``/home/aiida/.local`` directory of the container, which is mounted on the ``container-home-data`` volume. - - You can also mount a folder in container to a local directory, please refer to the `Docker documentation `__ for more information. - -.. dropdown:: Backup the container - - To backup the data of AiiDA, you can follow the instructions in the `Backup and restore `__ section. - However, Docker provides a convenient way to backup the container data by taking a snapshot of the entire container or the mounted volume(s). - - The following is adapted from the `Docker documentation `__. - - If you don't have a volume mounted to the container, you can backup the whole container by committing the container to an image: - - .. code-block:: console - - $ docker container commit aiida-container-demo aiida-container-backup - - The above command will create a new image named ``aiida-container-backup`` containing all the data and modifications you made in the container. - - Then, you can export the container to a local tarball and store it permanently: - - .. code-block:: console - - $ docker save -o aiida-container-backup.tar aiida-container-backup - - To restore the container, pull the image, or load from the tarball, run: - - .. code-block:: console - - $ docker load -i aiida-container-backup.tar - - You'll find a container in the list and you can then start it with ``docker start``. - - If you used a `named volume `__, you can backup the volume independently. - - .. tab-set:: - - .. tab-item:: Docker CLI - - Please check `Backup, restore, or migrate data volumes `__ for more information. - -.. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? diff --git a/docs/source/redirects.txt b/docs/source/redirects.txt index 855d62ec73..50f8ed2029 100644 --- a/docs/source/redirects.txt +++ b/docs/source/redirects.txt @@ -1,18 +1,18 @@ developer_guide/core/transport.rst topics/transport.rst developer_guide/core/extend_restapi.rst internals/rest_api.rst -get_started/index.rst intro/get_started.rst +get_started/index.rst installation/index.rst get_started/computers.rst howto/run_codes.rst get_started/codes.rst howto/run_codes.rst howto/plugins.rst howto/plugins_develop.rst howto/exploring.rst howto/query.rst import_export/main.rst internals/storage/sqlite_zip.rst internals/data_storage.rst internals/storage/sqlite_zip.rst -install/quick_installation.rst intro/get_started.rst -install/prerequisites.rst intro/get_started.rst -install/installation.rst intro/get_started.rst +install/quick_installation.rst installation/index.rst +install/prerequisites.rst installation/index.rst +install/installation.rst installation/index.rst install/configuration.rst howto/installation.rst install/updating_installation.rst howto/installation.rst -install/troubleshooting.rst intro/troubleshooting.rst +install/troubleshooting.rst installation/troubleshooting.rst restapi/index.rst reference/rest_api.rst verdi/verdi_user_guide.rst topics/cli.rst working_with_aiida/index.rst howto/index.rst diff --git a/docs/source/reference/cheatsheet.rst b/docs/source/reference/cheatsheet.rst new file mode 100644 index 0000000000..7f636c882e --- /dev/null +++ b/docs/source/reference/cheatsheet.rst @@ -0,0 +1,16 @@ +.. _reference:cheatsheet: + +================= +AiiDA cheat sheet +================= + +The AiiDA cheat sheet gives a broad overview of the most commonly used ``verdi`` commands, the inheritance hierarchy of the main AiiDA classes, their attributes and methods, as well as a showcase of the ``QueryBuilder``. + +When clicking on the embedded image, the pdf version will be opened in the browser. Where applicable, text elements contain hyperlinks to the relevant sections of the documentation. + +The file can also be :download:`downloaded ` in two-page layout for printing. + +Happy exploring! + +.. image:: ./cheatsheet/cheatsheet.png + :target: ../_static/cheatsheet_h.pdf diff --git a/docs/source/intro/_cheatsheet/cheatsheet.png b/docs/source/reference/cheatsheet/cheatsheet.png similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet.png rename to docs/source/reference/cheatsheet/cheatsheet.png diff --git a/docs/source/intro/_cheatsheet/cheatsheet.svg b/docs/source/reference/cheatsheet/cheatsheet.svg similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet.svg rename to docs/source/reference/cheatsheet/cheatsheet.svg diff --git a/docs/source/intro/_cheatsheet/cheatsheet_v.pdf b/docs/source/reference/cheatsheet/cheatsheet_v.pdf similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet_v.pdf rename to docs/source/reference/cheatsheet/cheatsheet_v.pdf diff --git a/docs/source/reference/index.rst b/docs/source/reference/index.rst index f0814e41bc..8553b84363 100644 --- a/docs/source/reference/index.rst +++ b/docs/source/reference/index.rst @@ -8,4 +8,5 @@ Reference command_line api/index rest_api + cheatsheet _changelog.md diff --git a/docs/source/topics/storage.rst b/docs/source/topics/storage.rst index 59d6761360..0d3b68d656 100644 --- a/docs/source/topics/storage.rst +++ b/docs/source/topics/storage.rst @@ -122,9 +122,9 @@ The command requires the PostgreSQL database to already exist and to be able to .. tip:: - Try the ``verdi quicksetup`` command to have the PostgreSQL database automatically created. + Try the ``verdi presto --use-postgres`` command to have the PostgreSQL database automatically created. Certain systems require root access to do so, causing the command to fail if it cannot obtain root access. - In this case, the database should be created manually (see :ref:`intro:install:database` for details). + In this case, the database should be created manually (see :ref:`installation:guide-complete:create-profile:core-psql-dos` for details). Once created, a profile can be created using the database with the command ``verdi profile setup core.psql_dos``. diff --git a/docs/source/tutorials/basic.md b/docs/source/tutorials/basic.md index 05bede4d50..f446cf2169 100644 --- a/docs/source/tutorials/basic.md +++ b/docs/source/tutorials/basic.md @@ -31,7 +31,7 @@ At the end of this tutorial, you will know how to: :::{important} If you are working on your own machine, note that the tutorial assumes that you have a working AiiDA installation and have set up your AiiDA profile in the current Python environment. -If this is not the case, consult the {ref}`getting started page`. +If this is not the case, consult the {ref}`getting started page`. ::: :::{tip} From 737da386b8564bc78e2f4a7fd9e2ae312accf03c Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 23:43:57 +0200 Subject: [PATCH 17/42] Daemon: Fix `DbLogHandler` not being configured (#6491) Processes run through the daemon would no longer have their log messages attached to the database. This would result in `verdi process report` returning nothing. The problem is that the `start_worker` function would call `configure_logging` without setting `with_orm=True` and so the `DbLogHandler` would essentially be undone. An integration test is added as a regression test. --- src/aiida/engine/daemon/worker.py | 2 +- src/aiida/workflows/arithmetic/multiply_add.py | 2 +- tests/engine/daemon/test_worker.py | 16 ++++++++++++++++ tests/tools/dumping/test_processes.py | 2 +- 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/aiida/engine/daemon/worker.py b/src/aiida/engine/daemon/worker.py index 172155f078..913e44d9b7 100644 --- a/src/aiida/engine/daemon/worker.py +++ b/src/aiida/engine/daemon/worker.py @@ -44,7 +44,7 @@ def start_daemon_worker(foreground: bool = False) -> None: write to the daemon log file. """ daemon_client = get_daemon_client() - configure_logging(daemon=not foreground, daemon_log_file=daemon_client.daemon_log_file) + configure_logging(with_orm=True, daemon=not foreground, daemon_log_file=daemon_client.daemon_log_file) LOGGER.debug(f'sys.executable: {sys.executable}') LOGGER.debug(f'sys.path: {sys.path}') diff --git a/src/aiida/workflows/arithmetic/multiply_add.py b/src/aiida/workflows/arithmetic/multiply_add.py index c4f2e1eeda..b1d29b9aae 100644 --- a/src/aiida/workflows/arithmetic/multiply_add.py +++ b/src/aiida/workflows/arithmetic/multiply_add.py @@ -49,7 +49,7 @@ def add(self): """Add two numbers using the `ArithmeticAddCalculation` calculation job plugin.""" inputs = {'x': self.ctx.product, 'y': self.inputs.z, 'code': self.inputs.code} future = self.submit(ArithmeticAddCalculation, **inputs) - + self.report(f'Submitted the `ArithmeticAddCalculation`: {future}') return ToContext(addition=future) def validate_result(self): diff --git a/tests/engine/daemon/test_worker.py b/tests/engine/daemon/test_worker.py index 6b923e403a..f8807fccab 100644 --- a/tests/engine/daemon/test_worker.py +++ b/tests/engine/daemon/test_worker.py @@ -10,6 +10,8 @@ import pytest from aiida.engine.daemon.worker import shutdown_worker +from aiida.orm import Log +from aiida.workflows.arithmetic.multiply_add import MultiplyAddWorkChain @pytest.mark.requires_rmq @@ -24,3 +26,17 @@ async def test_shutdown_worker(manager): finally: # Reset the runner of the manager, because once closed it cannot be reused by other tests. manager._runner = None + + +@pytest.mark.usefixtures('aiida_profile_clean', 'started_daemon_client') +def test_logging_configuration(aiida_code_installed, submit_and_await): + """Integration test to verify that the daemon has the logging properly configured including the ``DbLogHandler``. + + This is a regression test to make sure that the ``DbLogHandler`` is properly configured for daemon workers, which + ensures that log messages are written to the log table in the database for the corresponding node. + """ + code = aiida_code_installed('add') + node = submit_and_await(MultiplyAddWorkChain, x=1, y=2, z=3, code=code) + logs = Log.collection.get_logs_for(node) + assert len(logs) == 1 + assert 'Submitted the `ArithmeticAddCalculation`' in next(log.message for log in logs) diff --git a/tests/tools/dumping/test_processes.py b/tests/tools/dumping/test_processes.py index 371dcb80a9..aab1a48abb 100644 --- a/tests/tools/dumping/test_processes.py +++ b/tests/tools/dumping/test_processes.py @@ -465,4 +465,4 @@ def test_generate_parent_readme(tmp_path, generate_workchain_multiply_add): # Check for outputs of `verdi process status/report/show` assert 'Finished [0] [3:result]' in contents assert 'Property Value' in contents - assert 'No log messages' in contents + assert 'There are 1 log messages for this calculation' in contents From 04926fe20da15065f8f086f1ff3cb14cc163aa08 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sat, 29 Jun 2024 09:20:33 +0200 Subject: [PATCH 18/42] Engine: Catch `NotImplementedError`in `get_process_state_change_timestamp` (#6489) The `get_process_state_change_timestamp` utility calls the method `get_global_variable` on the storage backend to get the timestamp of the latest process state change, which is typically stored in the `db_dbsetting` table. However, not all storage plugins implement, most notably the `core.sqlite_zip` plugin. Since this is read-only, the settings table is never used and requesting the timestamp of the last process state change does not make sense. Since this utility is used in `verdi process list`, the command would error since the `NotImplementedError` was not caught. This is now the case and `verdi process list` will show "never" as the last state change. --- src/aiida/engine/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/aiida/engine/utils.py b/src/aiida/engine/utils.py index 44b8319ddc..888089dc64 100644 --- a/src/aiida/engine/utils.py +++ b/src/aiida/engine/utils.py @@ -316,9 +316,13 @@ def get_process_state_change_timestamp(process_type: Optional[str] = None) -> Op for process_type_key in process_types: key = PROCESS_STATE_CHANGE_KEY.format(process_type_key) try: - time_stamp = backend.get_global_variable(key) - if time_stamp is not None: - timestamps.append(datetime.fromisoformat(str(time_stamp))) + try: + timestamp = backend.get_global_variable(key) + except NotImplementedError: + pass + else: + if timestamp is not None: + timestamps.append(datetime.fromisoformat(str(timestamp))) except KeyError: continue From 1d104d06b95da36c71cab132c7b6fec52a005e18 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sat, 29 Jun 2024 22:00:24 +0200 Subject: [PATCH 19/42] ORM: Cache the logger adapter for `ProcessNode` (#6492) The logger adapter was recreated each time the `logger` property of the `ProcessNode` was invoked. It is now created once in the `logger` property. The created logger adapter is assigned to the `_logger_adapter` attribute such that it can simply be returned at the next invocation. The initialization of the adapter cannot be done in the constructor as that route is not taken if an existing node is loaded from the database. Finally, the `logger` property only creates and returns the adapter when the node is stored. Otherwise it simply returns the base logger instance. This is because the logger adapter only works for stored nodes and if it were instantiated at the point when the node is unstored, it would not be regenerated once the node is stored, and so the `DbLogHandler` will never be able to persist log messages to the database. --- src/aiida/orm/nodes/process/process.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/aiida/orm/nodes/process/process.py b/src/aiida/orm/nodes/process/process.py index a1223a86fb..003aa231e4 100644 --- a/src/aiida/orm/nodes/process/process.py +++ b/src/aiida/orm/nodes/process/process.py @@ -251,7 +251,16 @@ def logger(self): """ from aiida.orm.utils.log import create_logger_adapter - return create_logger_adapter(self._logger, self) + # If the node is not yet stored, there is no point in creating the logger adapter yet, as the ``DbLogHandler`` + # it configures, is only triggered for stored nodes, otherwise it cannot link the log message to the node. + if not self.pk: + return self._logger + + # First time the property is called after the node is stored, create the logger adapter + if not hasattr(self, '_logger_adapter'): + self._logger_adapter = create_logger_adapter(self._logger, self) + + return self._logger_adapter @classmethod def recursive_merge(cls, left: dict[Any, Any], right: dict[Any, Any]) -> None: From 310ff1db77bc75b6cadedf77394b96af05456f43 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Sat, 29 Jun 2024 22:02:52 +0200 Subject: [PATCH 20/42] Docs: Clarify `Transport.copy` requires `recursive=True` if source is a directory (#6495) --- src/aiida/transports/plugins/ssh.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/aiida/transports/plugins/ssh.py b/src/aiida/transports/plugins/ssh.py index 7035290a61..c62f17a67b 100644 --- a/src/aiida/transports/plugins/ssh.py +++ b/src/aiida/transports/plugins/ssh.py @@ -1118,11 +1118,13 @@ def copy(self, remotesource, remotedestination, dereference=False, recursive=Tru Flags used: ``-r``: recursive copy; ``-f``: force, makes the command non interactive; ``-L`` follows symbolic links - :param remotesource: file to copy from + :param remotesource: file to copy from :param remotedestination: file to copy to :param dereference: if True, copy content instead of copying the symlinks only Default = False. - :param recursive: if True copy directories recursively, otherwise only copy the specified file(s) + :param recursive: if True copy directories recursively. + Note that if the `remotesource` is a directory, `recursive` should always be set to True. + Default = True. :type recursive: bool :raise OSError: if the cp execution failed. From a44e6433d197244589c12c49031abfb442ec809f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 30 Jun 2024 08:38:14 +0200 Subject: [PATCH 21/42] Tests: Fix `tests.orm.nodes.test_node:test_delete_through_backend` (#6496) The test was failing with a `core.sqlite_dos` storage plugin for the test profile. The problem is that the last assert was checking that the logs for `data_two` were deleted because `data_two` itself had been deleted. However, since it was deleted, the ORM instance can no longer be used either, which was causing an exception. Instead, its pk should be recorded before deleting the node, and the final check should just use the pk directly. It is not quite clear why this test was not failing for the default `core.psql_dos` storage plugin that is used for tests. It should not be backend specific since both use SQLAlchemy for the ORM. --- tests/orm/nodes/test_node.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/orm/nodes/test_node.py b/tests/orm/nodes/test_node.py index 6b757d995a..ddcc586b84 100644 --- a/tests/orm/nodes/test_node.py +++ b/tests/orm/nodes/test_node.py @@ -837,17 +837,13 @@ def test_tab_completable_properties(self): class TestNodeDelete: """Tests for deleting nodes.""" - # TODO: Why is this failing for SQLite?? - # sqlalchemy.orm.exc.ObjectDeletedError: Instance '' has been deleted, - # or its row is otherwise not present. - # https://github.com/aiidateam/aiida-core/issues/6436 - @pytest.mark.requires_psql def test_delete_through_backend(self): """Test deletion works correctly through the backend.""" backend = get_manager().get_profile_storage() data_one = Data().store() data_two = Data().store() + data_two_pk = data_two.pk calculation = CalculationNode() calculation.base.links.add_incoming(data_one, LinkType.INPUT_CALC, 'input_one') calculation.base.links.add_incoming(data_two, LinkType.INPUT_CALC, 'input_two') @@ -866,7 +862,7 @@ def test_delete_through_backend(self): assert len(Log.collection.get_logs_for(data_one)) == 1 assert Log.collection.get_logs_for(data_one)[0].pk == log_one.pk - assert len(Log.collection.get_logs_for(data_two)) == 0 + assert len(Log.collection.find({'dbnode_id': data_two_pk})) == 0 def test_delete_collection_logs(self): """Test deletion works correctly through objects collection.""" From 24cfbe27e7408b78fca8e6f69799ebad3659400b Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 30 Jun 2024 08:39:28 +0200 Subject: [PATCH 22/42] `QueryBuilder`: Remove implementation for `has_key` in SQLite storage (#6497) The SQLite based storage plugins implemented the `has_key` operator for the `QueryBuilder`, however, the implementation is incorrect. At the very least the negation operator does not work. Since this can silently return incorrect query results, it is best to remove the implementation and raise an `NotImplementedError`. The same is done for `contains` which was not yet implemented but also didn't yet raise an explicit exception. --- src/aiida/storage/sqlite_zip/orm.py | 13 ++++--------- tests/cmdline/commands/test_calcjob.py | 12 +++++------- tests/orm/test_querybuilder.py | 1 + tests/storage/sqlite/test_orm.py | 5 +++-- tests/test_nodes.py | 1 + 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/aiida/storage/sqlite_zip/orm.py b/src/aiida/storage/sqlite_zip/orm.py index 494a733afb..ad0412f006 100644 --- a/src/aiida/storage/sqlite_zip/orm.py +++ b/src/aiida/storage/sqlite_zip/orm.py @@ -284,17 +284,12 @@ def _cast_json_type(comparator: JSON.Comparator, value: Any) -> Tuple[ColumnElem type_filter, casted_entity = _cast_json_type(database_entity, value) return case((type_filter, casted_entity.ilike(value, escape='\\')), else_=False) - # if operator == 'contains': - # to-do, see: https://github.com/sqlalchemy/sqlalchemy/discussions/7836 + if operator == 'contains': + # to-do, see: https://github.com/sqlalchemy/sqlalchemy/discussions/7836 + raise NotImplementedError('The operator `contains` is not implemented for SQLite-based storage plugins.') if operator == 'has_key': - return case( - ( - func.json_type(database_entity) == 'object', - func.json_each(database_entity).table_valued('key', joins_implicitly=True).c.key == value, - ), - else_=False, - ) + raise NotImplementedError('The operator `has_key` is not implemented for SQLite-based storage plugins.') if operator == 'in': type_filter, casted_entity = _cast_json_type(database_entity, value[0]) diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index 0b42ac0096..9fa6467d7f 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -241,7 +241,10 @@ def test_calcjob_outputcat(self): retrieved.base.repository._repository.put_object_from_filelike(io.BytesIO(b'5\n'), 'aiida.out') retrieved.base.repository._update_repository_metadata() - def test_calcjob_cleanworkdir_basic(self, pytestconfig): + # This currently fails with sqlite backend since the filtering relies on the `has_key` filter which is not + # implemented in SQLite, see https://github.com/aiidateam/aiida-core/pull/6497 + @pytest.mark.requires_psql + def test_calcjob_cleanworkdir_basic(self): """Test verdi calcjob cleanworkdir""" # Specifying no filtering options and no explicit calcjobs should exit with non-zero status options = [] @@ -261,17 +264,12 @@ def test_calcjob_cleanworkdir_basic(self, pytestconfig): # The flag should have been set assert self.result_job.outputs.remote_folder.base.extras.get('cleaned') is True - # TODO: This currently fails with sqlite backend, - # since the filtering relies on the `has_key` filter which is not implemented in SQLite. - # https://github.com/aiidateam/aiida-core/issues/6256 - marker_opt = pytestconfig.getoption('-m') - if 'not requires_psql' in marker_opt or 'presto' in marker_opt: - pytest.xfail('Known sqlite backend failure') # Do it again should fail as the calcjob has been cleaned options = ['-f', str(self.result_job.uuid)] result = self.cli_runner.invoke(command.calcjob_cleanworkdir, options) assert result.exception is not None, result.output + @pytest.mark.requires_psql def test_calcjob_cleanworkdir_advanced(self): # Check applying both p and o filters for flag_p in ['-p', '--past-days']: diff --git a/tests/orm/test_querybuilder.py b/tests/orm/test_querybuilder.py index e39f20a7b9..862474bc76 100644 --- a/tests/orm/test_querybuilder.py +++ b/tests/orm/test_querybuilder.py @@ -1537,6 +1537,7 @@ def test_iterall_with_store_group(self): for pk, pk_clone in zip(pks, [e[1] for e in sorted(pks_clone)]): assert orm.load_node(pk) == orm.load_node(pk_clone) + @pytest.mark.requires_psql @pytest.mark.usefixtures('aiida_profile_clean') def test_iterall_persistence(self, manager): """Test that mutations made during ``QueryBuilder.iterall`` context are automatically committed and persisted. diff --git a/tests/storage/sqlite/test_orm.py b/tests/storage/sqlite/test_orm.py index 549c433511..21c75f1302 100644 --- a/tests/storage/sqlite/test_orm.py +++ b/tests/storage/sqlite/test_orm.py @@ -89,8 +89,9 @@ ({'attributes.integer': {'in': [5, 6, 7]}}, 0), ({'attributes.integer': {'in': [1, 2, 3]}}, 1), # object operators - ({'attributes.dict': {'has_key': 'k'}}, 0), - ({'attributes.dict': {'has_key': 'key1'}}, 1), + # Reenable when ``has_key`` operator is implemented, see https://github.com/aiidateam/aiida-core/issues/6498 + # ({'attributes.dict': {'has_key': 'k'}}, 0), + # ({'attributes.dict': {'has_key': 'key1'}}, 1), ), ids=json.dumps, ) diff --git a/tests/test_nodes.py b/tests/test_nodes.py index 6f64ab6d2d..bd971d37db 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -162,6 +162,7 @@ def init_profile(self, aiida_localhost): """Initialize the profile.""" self.computer = aiida_localhost + @pytest.mark.requires_psql def test_with_subclasses(self): from aiida.plugins import DataFactory From 297c3c9821b22c511e80b82e504064eff326130a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 07:46:55 +0200 Subject: [PATCH 23/42] Devops: Bump `docker/bake-action` from 4 to 5 (#6500) Updates `docker/bake-action` from 4 to 5 - [Release notes](https://github.com/docker/bake-action/releases) - [Commits](https://github.com/docker/bake-action/compare/v4...v5) --- .github/workflows/docker-build-test.yml | 2 +- .github/workflows/docker-build.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml index a30ce9de63..7c48b38493 100644 --- a/.github/workflows/docker-build-test.yml +++ b/.github/workflows/docker-build-test.yml @@ -41,7 +41,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build images - uses: docker/bake-action@v4 + uses: docker/bake-action@v5 with: # Load to Docker engine for testing load: true diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 6201b93776..b278ec8349 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -54,7 +54,7 @@ jobs: - name: Build and upload to ghcr.io 📤 id: build - uses: docker/bake-action@v4 + uses: docker/bake-action@v5 with: push: true workdir: .docker/ From 61ae1a55b94c50979b4e47bb7572e1d4c9b2391f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 11:12:25 +0200 Subject: [PATCH 24/42] Dependencies: Update the requirements files (#6501) --- requirements/requirements-py-3.10.txt | 17 +++++++---------- requirements/requirements-py-3.11.txt | 19 ++++++++----------- requirements/requirements-py-3.12.txt | 7 ++++--- requirements/requirements-py-3.9.txt | 21 +++++++++------------ 4 files changed, 28 insertions(+), 36 deletions(-) diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index 3955a57530..7bd1c23ce4 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -4,14 +4,15 @@ # # pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.10.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,7 +45,6 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 exceptiongroup==1.1.1 executing==1.2.0 fastjsonschema==2.17.1 @@ -66,6 +66,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -80,9 +81,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -91,7 +91,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -133,6 +132,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -147,7 +147,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -161,7 +161,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -193,7 +192,6 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tomli==2.0.1 tornado==6.3.2 @@ -201,7 +199,6 @@ tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index feedaae17a..db6593c6ba 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -2,16 +2,17 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.11.txt pyproject.toml +# pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.9.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,7 +45,6 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 executing==1.2.0 fastjsonschema==2.17.1 flask==2.3.2 @@ -65,6 +65,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -79,9 +80,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -90,7 +90,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -132,6 +131,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -146,7 +146,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -160,7 +160,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -192,14 +191,12 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tornado==6.3.2 tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 3246ddc471..78f4e3a8f5 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -11,6 +11,7 @@ aiormq==3.3.1 alabaster==0.7.13 alembic==1.12.0 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==4.0.0 archive-path==0.4.2 argon2-cffi==23.1.0 @@ -34,7 +35,7 @@ click==8.1.7 click-spinner==0.1.10 comm==0.1.4 contourpy==1.1.1 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.5 cycler==0.12.1 debugpy==1.8.0 @@ -130,6 +131,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.16.1 pymatgen==2023.10.11 @@ -144,7 +146,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.5.0 pytest-regressions==2.5.0 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -184,7 +186,6 @@ sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.9 sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 -sqlalchemy-utils==0.37.9 stack-data==0.6.3 sympy==1.12 tabulate==0.9.0 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 5b0d89b5bc..a576ca238d 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -4,14 +4,15 @@ # # pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.9.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,8 +45,7 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 -exceptiongroup==1.1.1 +exceptiongroup==1.2.1 executing==1.2.0 fastjsonschema==2.17.1 flask==2.3.2 @@ -59,7 +59,7 @@ greenlet==2.0.2 idna==3.4 imagesize==1.4.1 importlib-metadata==6.8.0 -importlib-resources==5.12.0 +importlib-resources==6.4.0 iniconfig==2.0.0 ipykernel==6.23.2 ipython==8.14.0 @@ -68,6 +68,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -82,9 +83,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -93,7 +93,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -135,6 +134,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -149,7 +149,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -163,7 +163,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -195,7 +194,6 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tomli==2.0.1 tornado==6.3.2 @@ -203,7 +201,6 @@ tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 From 076cd79bfb584872d6d1796d68b856ac8e5a5085 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 12:03:55 +0200 Subject: [PATCH 25/42] Devops: Update pre-commit step in CD workflow to match CI The CI workflow updated the Python version and dependency requirements for the `pre-commit` job, however, did not apply the same changes to the CD workflow. This can cause differences in pre-commit causing the CI to pass but the CD to fail. --- .github/workflows/release.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4a3f0e8a19..80f7e35326 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,8 +40,9 @@ jobs: - name: Install aiida-core and pre-commit uses: ./.github/actions/install-aiida-core with: - python-version: '3.10' + python-version: '3.11' extras: '[pre-commit]' + from-requirements: 'false' - name: Run pre-commit run: pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) From 2ccfeeebb1fff157e87599855704b25b17d64f22 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 17 Apr 2024 23:32:57 +0200 Subject: [PATCH 26/42] Release `v2.6.0` --- CHANGELOG.md | 253 ++++++++++++++++++++++++++++++++++++++++++ docs/source/conf.py | 2 +- src/aiida/__init__.py | 2 +- 3 files changed, 255 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51a1320b0..b40c27e1dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,258 @@ # Changelog +## v2.6.0 - 2024-07-01 + +This minor release comes with a number of features that are focused on user friendliness and ease-of-use of the CLI and the API. +The caching mechanism has received a number of improvements guaranteeing even greater savings of computational time. +For existing calculations to be valid cache sources in the new version, their hash has to be regenerated (see [Improvements and changes to caching](#improvements-and-changes-to-caching) for details). + +- [Making RabbitMQ optional](#making-rabbitmq-optional) +- [Simplifying profile setup](#simplifying-profile-setup) +- [Improved test fixtures without services](#improved-test-fixtures-without-services) +- [Improvements and changes to caching](#improvements-and-changes-to-caching) +- [Programmatic syntax for query builder filters and projections](#programmatic-syntax-for-query-builder-filters-and-projections) +- [Automated profile storage backups](#automated-profile-storage-backups) +- [Full list of changes](#full-list-of-changes) + - [Features](#features) + - [Performance](#performance) + - [Changes](#changes) + - [Fixes](#fixes) + - [Deprecations](#deprecations) + - [Dependencies](#dependencies) + - [Refactoring](#refactoring) + - [Documentation](#documentation) + - [Devops](#devops) + + +### Making RabbitMQ optional + +The RabbitMQ message broker service is now optional for running AiiDA. +The requirement was added in AiiDA v1.0 when the engine was completely overhauled. +Although it significantly improved the scaling and responsiveness, it also made it more difficult to start using AiiDA. +As of v2.6, profiles can now be configured without RabbitMQ, at the cost that the daemon can not be used and all processes have to be run locally. + +### Simplifying profile setup + +With the removal of RabbitMQ as a hard requirement, combined with storage plugins that replace PostgreSQL with the serverless SQLite that were introduced in v2.5, it is now possible to setup a profile that requires no services. +A new command is introduced, `verdi presto`, that automatically creates a profile with sensible defaults. +This now in principle makes it possible to run just the two following commands on any operating system: +``` +pip install aiida-core +verdi presto +``` +and get a working AiiDA installation that is ready to go. +As a bonus, it also configures the localhost as a `Computer`. +See the [documentation for more details](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/installation/guide_quick.html). + +### Improved test fixtures without services + +Up till now, running tests would always require a fully functional profile, which meant that PostgreSQL and RabbitMQ had to be available. +As described in the section above, it is now possible to set up a profile without these services. +This new feature is leveraged to provide a set of `pytest` fixtures that provide a test profile that can be used essentially on any system that just has AiiDA installed. +To start writing tests, simply create a `conftest.py` and import the fixtures with: +```python +pytest_plugins = 'aiida.tools.pytest_fixtures' +``` +The new fixtures include the `aiida_profile` fixture which is session-scoped and automatically loaded. +The fixture creates a temporary test profile at the start of the test session and automatically deletes it when the session ends. +For more information and an overview of all available fixtures, please refer to [the documentation on `pytest` fixtures](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/plugins.html#plugin-test-fixtures). + +### Improvements and changes to caching + +A number of fixes and changes to the caching mechanism were introduced (see the [changes](#changes) subsection of the [full list of changes](#full-list-of-changes) for a more detailed overview). +For existing calculations to be valid cache sources in the new version, their hash has to be regenerated by running `verdi node rehash`. +Note that this can take a while for large databases. + +Since its introduction, the cache would essentially be reset each time AiiDA or any of the plugin packages would be updated, since the version of these packages were included in the calculation of the node hashes. +This was originally done out of precaution to err on the safe-side and limit the possibility of false-positives in cache hits. +However, this strategy has turned out to be unnecessarily cautious and severely limited the effectiveness of caching. + +The package version information is no longer included in the hash and therefore no longer impacts the caching. +This change does now make it possible for false positives if the implementation of a `CalcJob` or `Parser` plugin changes signficantly. +Therefore, a mechanism is introduced to give control to these plugins to effectively reset the cache of existing nodes. +Please refer to the [documentation on controlling caching](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/provenance/caching.html#calculation-jobs-and-parsers) for more details. + +### Programmatic syntax for query builder filters and projections + +In the `QueryBuilder`, fields to filter on or project always had to be provided with strings: +```python +QueryBuilder().append(Node, filters={'label': 'some-label'}, project=['id', 'ctime']) +``` +and it is not always trivial to know what fields exist that _can_ be filtered on and can be projected. +In addition, there was a discrepancy for some fields, most notably the `pk` property, which had to be converted to `id` in the query builder syntax. + +These limitations have been solved as each class in AiiDA's ORM now defines the `fields` property, which allows to discover these fields programmatically. +The example above would convert to: +```python +QueryBuilder().append(Node, filters={Node.fields.label: 'some-label'}, project=[Node.fields.pk, Node.fields.ctime]) +``` +The `fields` property provides tab-completion allowing easy discovery of available fields for an ORM class in IDEs and interactive shells. +The fields also allow to express logical conditions programmatically and more. +For more details, please refer to the [documentation on programmatic field syntax](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/query.html#programmatic-syntax-for-filters). + +Data plugins can also define custom fields, adding on top of the fields inherited from their base class(es). +The [documentation on data plugin fields](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/data_types.html#fields) provides more information, but the API is currently in beta and guaranteed to be changed in an upcoming version. +It is therefore recommended for plugin developers to hold off making use of this new API. + +### Automated profile storage backups + +A generic mechanism has been implemented to allow easily backing up the data of a profile. +The command `verdi storage backup` automatically maintains a directory structure of previous backups allowing efficient incremental backups. +Note that the exact details of the backup mechanism is dependent on the storage plugin that is used by the profile and not all storage plugins necessarily implement it. +For now the storage plugins `core.psql_dos`, and `core.sqlite_dos` implement the functionality. +For more information, please refer [to the documentation](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/installation.html#backing-up-your-installation). +Please refer to [this section of the documentation](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/installation.html#restoring-data-from-a-backup) for instructions to restore from a backup. + +### Full list of changes + +#### Features +- `CalcJob`: Allow to define order of copying of input files [[6898ff4d8]](https://github.com/aiidateam/aiida-core/commit/6898ff4d8c263cf08707c61411a005f6a7f731dd) +- `SqliteDosStorage`: Implement the backup functionality [[18e447c77]](https://github.com/aiidateam/aiida-core/commit/18e447c77f48a18f361e458186cd87b2355aea75) +- `SshTransport`: Return `FileNotFoundError` if destination parent does not exist [[d86bb38bf]](https://github.com/aiidateam/aiida-core/commit/d86bb38bf9a0ced8029f8a4b895e1a6be1ccb339) +- Add improved more configurable versions of `pytest` fixtures [[e3a60460e]](https://github.com/aiidateam/aiida-core/commit/e3a60460ef1208a5c46ecd6af35d891a88ee784e) +- Add the `orm.Entity.fields` interface for `QueryBuilder` [[4b9abe2bd]](https://github.com/aiidateam/aiida-core/commit/4b9abe2bd0bb82449547a3377c2b6dbc7c174123) +- CLI: `verdi computer test` make unexpected output check optional [[589a3b2c0]](https://github.com/aiidateam/aiida-core/commit/589a3b2c03d44cebd26e88243ca34fcdb0e23ff4) +- CLI: `verdi node graph generate` root nodes as arguments [[06f8f4cfb]](https://github.com/aiidateam/aiida-core/commit/06f8f4cfb0731ff699d5c01ad85418b6db0f6778) +- CLI: Add `--most-recent-node` option to `verdi process watch` [[72692fa5c]](https://github.com/aiidateam/aiida-core/commit/72692fa5cb667e2a7462770af18b7cedeaf8b3f0) +- CLI: Add `--sort/--no-sort` to `verdi code export` [[80c606890]](https://github.com/aiidateam/aiida-core/commit/80c60689063f1517c3de91d86eef80f7852667e3) +- CLI: Add `verdi process dump` and the `ProcessDumper` [[6291accf0]](https://github.com/aiidateam/aiida-core/commit/6291accf0538eafe7426e89bc4c1e9eb90ce0385) +- CLI: Add RabbitMQ options to `verdi profile setup` [[f553f805e]](https://github.com/aiidateam/aiida-core/commit/f553f805e86d766da6208eb1682f7cf12c7907ac) +- CLI: Add the `-M/--most-recent-node` option [[5aae874aa]](https://github.com/aiidateam/aiida-core/commit/5aae874aaa44459ce8cf3ddd3bf1a82d8a2e8d37) +- CLI: Add the `verdi computer export` command [[9e3ebf6ea]](https://github.com/aiidateam/aiida-core/commit/9e3ebf6ea55d883c7857a1dbafe398b9579cca03) +- CLI: Add the `verdi node list` command [[cf091e80f]](https://github.com/aiidateam/aiida-core/commit/cf091e80ff2b6aa03f41b56ba1976abb97298972) +- CLI: Add the `verdi presto` command [[6b6e1520f]](https://github.com/aiidateam/aiida-core/commit/6b6e1520f2d3807e366dd672e7917f381ea7b524) +- CLI: Add the `verdi profile configure-rabbitmq` command [[202a3ece9]](https://github.com/aiidateam/aiida-core/commit/202a3ece9705289a1f12c85e64cf90307ca85c39) +- CLI: Allow `verdi computer delete` to delete associated nodes [[348777571]](https://github.com/aiidateam/aiida-core/commit/3487775711e7412fb2cb82600fb266316d6ce12a) +- CLI: Allow multiple root nodes in `verdi node graph generate` [[f16c432af]](https://github.com/aiidateam/aiida-core/commit/f16c432af107b1f9c01a12e03cbd0a9ecc2744ad) +- Engine: Allow `CalcJob` monitors to return outputs [[b7e59a0db]](https://github.com/aiidateam/aiida-core/commit/b7e59a0dbc0dd629be5c8178e98c70e7a2c116e9) +- Make `postgres_cluster` and `config_psql_dos` fixtures configurable [[35d7ca63b]](https://github.com/aiidateam/aiida-core/commit/35d7ca63b44f051a26d3f96d84e043919eb3f101) +- Process: Add the `metadata.disable_cache` input [[4626b11f8]](https://github.com/aiidateam/aiida-core/commit/4626b11f85cd0d95a17d8f5766a90b88ddddd689) +- Storage: Add backup mechanism to the interface [[bf79f23ee]](https://github.com/aiidateam/aiida-core/commit/bf79f23eef66d362a34aac170577ba8f5c2088ba) +- Transports: fix overwrite behaviour for `puttree`/`gettree` [[a55451703]](https://github.com/aiidateam/aiida-core/commit/a55451703aa8f8d330b25bc5da95d41caf0db9ac) + +#### Performance +- CLI: Speed up tab-completion by lazily importing `Config` [[9524cda0b]](https://github.com/aiidateam/aiida-core/commit/9524cda0b8c742fb5bf740d7b0035e326eace28f) +- Improve import time of `aiida.orm` and `aiida.storage` [[fb9b6cc3b]](https://github.com/aiidateam/aiida-core/commit/fb9b6cc3b3df244549fdd78576c34f6d9dfd4568) +- ORM: Cache the logger adapter for `ProcessNode` [[1d104d06b]](https://github.com/aiidateam/aiida-core/commit/1d104d06b95da36c71cab132c7b6fec52a005e18) + +#### Changes +- Caching: `NodeCaching._get_objects_to_hash` return type to `dict` [[c9c7c4bd8]](https://github.com/aiidateam/aiida-core/commit/c9c7c4bd8e1cd306271b5cf267095d3cbd8aafe2) +- Caching: Add `CACHE_VERSION` attribute to `CalcJob` and `Parser` [[39d0f312d]](https://github.com/aiidateam/aiida-core/commit/39d0f312d212a642d1537ca89e7622e48a23e701) +- Caching: Include the node's class in objects to hash [[68ce11161]](https://github.com/aiidateam/aiida-core/commit/68ce111610c40e3d9146e128c0a698fc60b6e5e5) +- Caching: Make `NodeCaching._get_object_to_hash` public [[e33000402]](https://github.com/aiidateam/aiida-core/commit/e330004024ad5121f9bc82cbe972cd283f25fec8) +- Caching: Remove core and plugin information from hash calculation [[4c60bbef8]](https://github.com/aiidateam/aiida-core/commit/4c60bbef852eef55a06b48b813d3fbcc8fb5a43f) +- Caching: Rename `get_hash` to `compute_hash` [[b544f7cf9]](https://github.com/aiidateam/aiida-core/commit/b544f7cf95a0e6e698224f36c1bea57d1cd99e7d) +- CLI: Always do hard reset in `verdi daemon restart` [[8ac642410]](https://github.com/aiidateam/aiida-core/commit/8ac6424108d1528bd3279c81da62dd44855b6ebc) +- CLI: Change `--profile` to `-p/--profile-name` for `verdi profile setup` [[8ea203cd9]](https://github.com/aiidateam/aiida-core/commit/8ea203cd9b1d2fbb4a3b38ba67beec97bb8c7145) +- CLI: Let `-v/--verbosity` only affect `aiida` and `verdi` loggers [[487c6bf04]](https://github.com/aiidateam/aiida-core/commit/487c6bf047030ee19deed49d5fbf9a093253538e) +- Engine: Set the `to_aiida_type` as default inport port serializer [[2fa7a5305]](https://github.com/aiidateam/aiida-core/commit/2fa7a530511a94ead83d79669efed71706a0a472) +- `QueryBuilder`: Remove implementation for `has_key` in SQLite storage [[24cfbe27e]](https://github.com/aiidateam/aiida-core/commit/24cfbe27e7408b78fca8e6f69799ebad3659400b) + +#### Fixes +- `BandsData`: Use f-strings in `_prepare_gnuplot` [[dba117437]](https://github.com/aiidateam/aiida-core/commit/dba117437782abc6d11f9ef208923f7e70f79ed2) +- `BaseRestartWorkChain`: Fix handler overrides used only first iteration [[65786a6bd]](https://github.com/aiidateam/aiida-core/commit/65786a6bda1c74dfb4aea90becd0664de6b1abde) +- `SlurmScheduler`: Make detailed job info fields dynamic [[4f9774a68]](https://github.com/aiidateam/aiida-core/commit/4f9774a689b81a446fac37ad8281b2d854eefa7a) +- `SqliteDosStorage`: Fix exception when importing archive [[af0c260bb]](https://github.com/aiidateam/aiida-core/commit/af0c260bb1c32c3b33c50175d790907774561b3e) +- `StructureData`: Fix the pbc constraints of `get_pymatgen_structure` [[adcce4bcd]](https://github.com/aiidateam/aiida-core/commit/adcce4bcd0b59c8371be73058a060bedcaba40f6) +- Archive: Automatically create nested output directories [[212f6163b]](https://github.com/aiidateam/aiida-core/commit/212f6163b03b8762509ae2230c30172af8c02fed) +- Archive: Respect `filter_size` in query for existing nodes [[ef60b66aa]](https://github.com/aiidateam/aiida-core/commit/ef60b66aa3ce76d654abe5e7caafef3f221defd0) +- CLI: Ensure deprecation warnings are printed before any prompts [[deb293d0e]](https://github.com/aiidateam/aiida-core/commit/deb293d0e6a566256fac5069881de4846d77f6d1) +- CLI: Fix `verdi archive create --dry-run` for empty file repository [[cc96c9d04]](https://github.com/aiidateam/aiida-core/commit/cc96c9d043c6616a068a5498f557fa21a728eb96) +- CLI: Fix `verdi plugin list` incorrectly not displaying description [[e952d7717]](https://github.com/aiidateam/aiida-core/commit/e952d7717c1d8001555e8d19f54f4fa349da6c6e) +- CLI: Fix `verdi process [show|report|status|watch|call-root]` no output [[a56a1389d]](https://github.com/aiidateam/aiida-core/commit/a56a1389dee5cb9ae70a5511d77aad248ea21731) +- CLI: Fix `verdi process list` if no available workers [[b44afcb3c]](https://github.com/aiidateam/aiida-core/commit/b44afcb3c1a7efa452d4e72aa6f8a615f652aaa4) +- CLI: Fix `verdi quicksetup` when profiles exist where storage is not `core.psql_dos` [[6cb91c181]](https://github.com/aiidateam/aiida-core/commit/6cb91c18163ac6228ed4a64c1c467dfd0398a624) +- CLI: Fix dry-run resulting in critical error in `verdi archive import` [[36991c6c8]](https://github.com/aiidateam/aiida-core/commit/36991c6c84f4ba0b4553e8cd6689bbc1815dbd35) +- CLI: Fix logging not showing in `verdi daemon worker` [[9bd8585bd]](https://github.com/aiidateam/aiida-core/commit/9bd8585bd5e7989e24646a0018710e86836e5a9f) +- CLI: Fix the `ctx.obj.profile` attribute not being initialized [[8a286f26e]](https://github.com/aiidateam/aiida-core/commit/8a286f26e8d303c498ac2eabd49be5f1f4ced9ef) +- CLI: Hide misleading message for `verdi archive create --test-run` [[7e42d7aa7]](https://github.com/aiidateam/aiida-core/commit/7e42d7aa7d16fa9e81cbd300ada14e4dea2426ce) +- CLI: Improve error message of `PathOrUrl` and `FileOrUrl` [[ffc6e4f70]](https://github.com/aiidateam/aiida-core/commit/ffc6e4f706277854dbd454d6f3164cec31e7819a) +- CLI: Only configure logging in `set_log_level` callback once [[66a2dcedd]](https://github.com/aiidateam/aiida-core/commit/66a2dcedd0a9428b5b2218b8c82bad9c9aff4956) +- CLI: Unify help of `verdi process` commands [[d91e0a58d]](https://github.com/aiidateam/aiida-core/commit/d91e0a58dabfd242b5f886d692c8761499a6719c) +- Config: Set existing user as default for read-only storages [[e66592509]](https://github.com/aiidateam/aiida-core/commit/e665925097bb3344fde4bcc66ee185a2d9207ac3) +- Config: Use UUID in `Manager.load_profile` to identify profile [[b01038bf1]](https://github.com/aiidateam/aiida-core/commit/b01038bf1fca7d33c4915aee904acea89a847614) +- Daemon: Log the worker's path and Python interpreter [[ae2094169]](https://github.com/aiidateam/aiida-core/commit/ae209416996ec361c474aeaf0fa06f49dd59f296) +- Docker: Start and stop daemon only when a profile exists [[0a5b20023]](https://github.com/aiidateam/aiida-core/commit/0a5b200236419d8caf8e05bb04ba80d03a438e03) +- Engine: Add positional inputs for `Process.submit` [[d1131fe94]](https://github.com/aiidateam/aiida-core/commit/d1131fe9450972080207db6e9615784490b3252b) +- Engine: Catch `NotImplementedError`in `get_process_state_change_timestamp` [[04926fe20]](https://github.com/aiidateam/aiida-core/commit/04926fe20da15065f8f086f1ff3cb14cc163aa08) +- Engine: Fix paused work chains not showing it in process status [[40b22d593]](https://github.com/aiidateam/aiida-core/commit/40b22d593875b97355996bbfc15e2850ad1f0495) +- Fix passwords containing `@` not being accepted for Postgres databases [[d14c14db2]](https://github.com/aiidateam/aiida-core/commit/d14c14db2f82d3a678e9747bd463ec1a61642120) +- ORM: Correct field type of `InstalledCode` and `PortableCode` models [[0079cc1e4]](https://github.com/aiidateam/aiida-core/commit/0079cc1e4b46c61edf2323b2d42af46367fe04b6) +- ORM: Fix `ProcessNode.get_builder_restart` [[0dee9d8ef]](https://github.com/aiidateam/aiida-core/commit/0dee9d8efba5c48615e8510f5ada706724b4a2e8) +- ORM: Fix deprecation warning being shown for new code types [[a9155713b]](https://github.com/aiidateam/aiida-core/commit/a9155713bbb10e57fe91cd320e2a12391d098a46) +- Runner: Close event loop in `Runner.close()` [[53cc45837]](https://github.com/aiidateam/aiida-core/commit/53cc458377685e54179eb1e1b73bb0383c8dae13) + +#### Deprecations +- CLI: Deprecate `verdi profile setdefault` and rename to `verdi profile set-default` [[ab48a4f62]](https://github.com/aiidateam/aiida-core/commit/ab48a4f627b4c9eec9133b5efa9fb888ce2c4914) +- CLI: Deprecate accepting `0` for `default_mpiprocs_per_machine` [[acec0c190]](https://github.com/aiidateam/aiida-core/commit/acec0c190cbb45ba267c6eb8ee7ceba18cf3302b) +- CLI: Deprecate the `deprecated_command` decorator [[4c11c0616]](https://github.com/aiidateam/aiida-core/commit/4c11c0616c583236119f838a1780a606c58b4ee2) +- CLI: Remove the deprecated `verdi database` command [[3dbde9e31]](https://github.com/aiidateam/aiida-core/commit/3dbde9e311781509b738202ad6f1de3bbd4b7a82) +- ORM: Undo deprecation of `Code.get_description` [[1b13014b1]](https://github.com/aiidateam/aiida-core/commit/1b13014b14274024dcb6bb0a721eb62665567987) + +### Dependencies +- Update `tabulate>=0.8.0,<0.10.0` [[6db2f4060]](https://github.com/aiidateam/aiida-core/commit/6db2f4060d4ece4552f5fe757c0f7d938810f4d1) + +#### Refactoring +- Abstract message broker functionality [[69389e038]](https://github.com/aiidateam/aiida-core/commit/69389e0387369d8437e1219487b88430b7b2e679) +- Config: Refactor `get_configuration_directory_from_envvar` [[65739f524]](https://github.com/aiidateam/aiida-core/commit/65739f52446087439ba93158eb948b58ed081ce5) +- Config: Refactor the `create_profile` function and method [[905e93444]](https://github.com/aiidateam/aiida-core/commit/905e93444cf996461e679cd458511d1c471a7e02) +- Engine: Refactor handling of `remote_folder` and `retrieved` outputs [[28adacaf8]](https://github.com/aiidateam/aiida-core/commit/28adacaf8ae21357bf6e5a2a48c43ed56d3bd78b) +- ORM: Switch to `pydantic` for code schema definition [[06189d528]](https://github.com/aiidateam/aiida-core/commit/06189d528c2362516f42e0d48840882812b97fe4) +- Replace deprecated `IOError` with `OSError` [[7f9129fd1]](https://github.com/aiidateam/aiida-core/commit/7f9129fd193374bdbeaa7ba4dd8c3cdf706db97d) +- Storage: Move profile locking to the abstract base class [[ea5f51bcb]](https://github.com/aiidateam/aiida-core/commit/ea5f51bcb6af172eb1a754df3981003bf7bad959) + +#### Documentation +- Add more instructions on how to use docker image [[aaf44afcc]](https://github.com/aiidateam/aiida-core/commit/aaf44afcce0f90fff2eb38bc47d28b4adf87db24) +- Add the updated cheat sheet [[09f9058a7]](https://github.com/aiidateam/aiida-core/commit/09f9058a7444f3ac1d3f243b608fa3f24f771f27) +- Add tips for common problems with conda PostgreSQL setup [[cd5313825]](https://github.com/aiidateam/aiida-core/commit/cd5313825afdb1771ca19d899567e4ed4774a2bc) +- Customize the color scheme through custom style sheet [[a6cf7fc7e]](https://github.com/aiidateam/aiida-core/commit/a6cf7fc7e02a48a7e3b9c4ba6ce5e2cd413e6b23) +- Docs: Clarify `Transport.copy` requires `recursive=True` if source is a directory [[310ff1db7]](https://github.com/aiidateam/aiida-core/commit/310ff1db77bc75b6cadedf77394b96af05456f43) +- Fix example of the `entry_points` fixture [[081fc5547]](https://github.com/aiidateam/aiida-core/commit/081fc5547370e1b5a19b1fb507681091c632bb7a) +- Fixing several small issues [[6a3a59b29]](https://github.com/aiidateam/aiida-core/commit/6a3a59b29ba64401828d9ab51dc123060868278b) +- Minor cheatsheet update for v2.6 release [[c3cc169c4]](https://github.com/aiidateam/aiida-core/commit/c3cc169c487a88e2357b7377e897f0521c23f05a) +- Reorganize the tutorial content [[5bd960efa]](https://github.com/aiidateam/aiida-core/commit/5bd960efae5a7f916b978a420f5f43501c9bc529) +- Rework the installation section [[0ee0a0c6a]](https://github.com/aiidateam/aiida-core/commit/0ee0a0c6ae13588e82edf1cf9e8cb9857c94c31b) +- Standardize usage of `versionadded` directive [[bf5dac848]](https://github.com/aiidateam/aiida-core/commit/bf5dac8484638d7ba5c492e91975b5fcc0cc9770) +- Update twitter logo [[5e4f60d83]](https://github.com/aiidateam/aiida-core/commit/5e4f60d83160774ca83defe4bf1f6c6381aaa1a0) +- Use uv installer in readthedocs build [[be0db3cc4]](https://github.com/aiidateam/aiida-core/commit/be0db3cc49506294ae1845b6e746e40cd76f39a9) + +#### Devops +- Add `check-jsonschema` pre-commit hook for GHA workflows [[14c5bb0f7]](https://github.com/aiidateam/aiida-core/commit/14c5bb0f764f0fd7933df205aa22d61c85ec0cf2) +- Add Dependabot config for maintaining GH actions [[0812f4b9e]](https://github.com/aiidateam/aiida-core/commit/0812f4b9eeffdff5a8c3d0802aea94c8919d9922) +- Add docker image `aiida-core-dev` for development [[6d0984109]](https://github.com/aiidateam/aiida-core/commit/6d0984109478ec1c0fd96dfd1d3f2b54e0b75dd2) +- Add Python 3.12 tox environment [[6b0d43960]](https://github.com/aiidateam/aiida-core/commit/6b0d4396068a43b6823eca8c78b9048044b0b4b8) +- Add the `slurm` service to nightly workflow [[5460a0414]](https://github.com/aiidateam/aiida-core/commit/5460a0414d55e3531eb86e6906ee963a6b712aae) +- Add typing to `aiida.common.hashing` [[ba21ba1d4]](https://github.com/aiidateam/aiida-core/commit/ba21ba1d40a76df73a2e27ce6f1a4f68aba7fb9a) +- Add workflow to build Docker images on PRs from forks [[23d2aa5ee]](https://github.com/aiidateam/aiida-core/commit/23d2aa5ee3c08438cfc4b4734e9670e19c090150) +- Address internal deprecation warnings [[ceed7d55d]](https://github.com/aiidateam/aiida-core/commit/ceed7d55dfb7df8dbe52c4557d145593d83f788a) +- Allow unit test suite to be ran against SQLite [[0dc8bbcb2]](https://github.com/aiidateam/aiida-core/commit/0dc8bbcb261b745683bc542c1aced2412ebd66a0) +- Bump the gha-dependencies group with 4 updates [[ccb56286c]](https://github.com/aiidateam/aiida-core/commit/ccb56286c40f6be0d61a0c62442993e43faf1ba6) +- Dependencies: Update the requirements files [[61ae1a55b]](https://github.com/aiidateam/aiida-core/commit/61ae1a55b94c50979b4e47bb7572e1d4c9b2391f) +- Disable code coverage in `test-install.yml` [[4cecda517]](https://github.com/aiidateam/aiida-core/commit/4cecda5177c456cee252c16295416c3842bb5d2d) +- Do not pin the mamba version [[82bba1307]](https://github.com/aiidateam/aiida-core/commit/82bba130792f6c965f0ede8b221eee70fd01d9f1) +- Fix Docker build not defining `REGISTRY` [[e7953fd4d]](https://github.com/aiidateam/aiida-core/commit/e7953fd4dd14875e380b125b99f86c12ce15359b) +- Fix publishing to DockerHub using incorrect secret name [[9c9ff7986]](https://github.com/aiidateam/aiida-core/commit/9c9ff79865225b125ba5f9fe23969d4c2c8fb9b2) +- Fix Slack notification for nightly tests [[082589f45]](https://github.com/aiidateam/aiida-core/commit/082589f456201fbd79d3df809e2cfc5fb5f27922) +- Fix the `test-install.yml` workflow [[22ea06362]](https://github.com/aiidateam/aiida-core/commit/22ea06362e9de5d314f103332da2e25ae6080f61) +- Fix the Docker builds [[3404c0192]](https://github.com/aiidateam/aiida-core/commit/3404c01925da941c08f246e231b6587f53ce445b) +- Increase timeout for the `test-install` workflow [[e36a3f11f]](https://github.com/aiidateam/aiida-core/commit/e36a3f11fdd165eea3af9f3337382e1bbd181390) +- Move RabbitMQ CI to nightly and update versions [[b47a56698]](https://github.com/aiidateam/aiida-core/commit/b47a56698e8fdf350a10c7abfd8ba00443fabd8d) +- Refactor the GHA Docker build [[e47932ee9]](https://github.com/aiidateam/aiida-core/commit/e47932ee9e0833dca546c7c7b5b584f2687d9073) +- Remove `verdi tui` from CLI reference documentation [[1b4a19a44]](https://github.com/aiidateam/aiida-core/commit/1b4a19a44461271aea58e54acd93e896220b413d) +- Run Docker workflow only for pushes to origin [[b1a714155]](https://github.com/aiidateam/aiida-core/commit/b1a714155ec9e51263e453a4354934fa91d04f33) +- Tests: Convert hierarchy functions into fixtures [[a02abc470]](https://github.com/aiidateam/aiida-core/commit/a02abc4701e81b75284164510be966ff0fd04dab) +- Tests: extend `node_and_calc_info` fixture to `core.ssh` [[9cf28f208]](https://github.com/aiidateam/aiida-core/commit/9cf28f20875fe6c3b0f2844bff16415b1dfc7b6f) +- Tests: Remove test classes for transport plugins [[b77e51f8c]](https://github.com/aiidateam/aiida-core/commit/b77e51f8c15c7ddea80d7d6328737abb705c6ce8) +- Tests: Unskip test in `tests/cmdline/commands/test_archive_import.py` [[7b7958c7a]](https://github.com/aiidateam/aiida-core/commit/7b7958c7aee150162cb4db0562a7352764a94c04) +- Update codecov action [[fc2a84d9b]](https://github.com/aiidateam/aiida-core/commit/fc2a84d9bd045d48d46511153dfde389070bf552) +- Update deprecated `whitelist_externals` option in tox config [[8feef5189]](https://github.com/aiidateam/aiida-core/commit/8feef5189ab9a2ba4b358bb6937d5d7c3f555ad8) +- Update pre-commit hooks [[3dda84ff3]](https://github.com/aiidateam/aiida-core/commit/3dda84ff3057a97e422b809a6adc778cbf60c125) +- Update pre-commit requirement `ruff==0.3.5` [[acd54543d]](https://github.com/aiidateam/aiida-core/commit/acd54543dffca05df7189f36c71afd2bb0065f34) +- Update requirements `mypy` and `pre-commit`[[04b3260a0]](https://github.com/aiidateam/aiida-core/commit/04b3260a098f061301edb0f56f1675fe9283b41b) +- Update requirements to address deprecation warnings [[566f681f7]](https://github.com/aiidateam/aiida-core/commit/566f681f72426a9a08200ff1d86c604f4c37bbcf) +- Use `uv` to install package in CI and CD [[73a734ae3]](https://github.com/aiidateam/aiida-core/commit/73a734ae3cd0977a97c631f97ddb781fa293864a) +- Use recursive dependencies for `pre-commit` extra [[6564e78dd]](https://github.com/aiidateam/aiida-core/commit/6564e78ddb349b89f6a3e9bfa81ce357ce865961) + ## v2.5.1 - 2024-01-31 diff --git a/docs/source/conf.py b/docs/source/conf.py index 745ed79c30..d017051cb3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -97,7 +97,7 @@ ipython_mplbackend = '' myst_enable_extensions = ['colon_fence', 'deflist'] -myst_heading_anchors = 3 +myst_heading_anchors = 4 nb_execution_show_tb = 'READTHEDOCS' in os.environ nb_merge_streams = True nb_mime_priority_overrides = [ diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 2edab0cd02..4d2bcab3a9 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.5.1.post0' +__version__ = '2.6.0' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From 9fe8fd2e0b88e746ee2156eccb71b7adbab6b2c5 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 13:33:46 +0200 Subject: [PATCH 27/42] Fixtures: Make `pgtest` truly an optional dependency (#6502) The pytest fixtures were improved to allow running with a `core.sqlite_dos` storage for the test profile, making PostgreSQL completely optional. However, the current fixture still imports the `pgtest` package at module level making it a requirement, despite it only being relevant when running the tests with a `core.psql_dos` storage plugin. Here the import is moved inside the `PostgresCluster._create` method which is only called when the test suite actually uses a PSQL based storage plugin. --- src/aiida/tools/pytest_fixtures/storage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiida/tools/pytest_fixtures/storage.py b/src/aiida/tools/pytest_fixtures/storage.py index 2f13cf25a1..4565e621b4 100644 --- a/src/aiida/tools/pytest_fixtures/storage.py +++ b/src/aiida/tools/pytest_fixtures/storage.py @@ -7,7 +7,6 @@ from uuid import uuid4 import pytest -from pgtest.pgtest import PGTest if t.TYPE_CHECKING: from pgtest.pgtest import PGTest @@ -19,6 +18,8 @@ def __init__(self): self.cluster = None def _create(self): + from pgtest.pgtest import PGTest + try: self.cluster = PGTest() except OSError as e: @@ -59,7 +60,6 @@ def create_database( return postgres_config -# TODO: Update docstring accordingly @pytest.fixture(scope='session') def postgres_cluster(): """Create a temporary and isolated PostgreSQL cluster using ``pgtest`` and cleanup after the yield. From 9c26ce7c1fc024265a8ffb986ae50783823aa1a7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 13:44:25 +0200 Subject: [PATCH 28/42] Release `v2.6.1` --- CHANGELOG.md | 6 ++++++ src/aiida/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b40c27e1dd..fbc5ddf965 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v2.6.1 - 2024-07-01 + +### Fixes: +- Fixtures: Make `pgtest` truly an optional dependency [[9fe8fd2e0]](https://github.com/aiidateam/aiida-core/commit/9fe8fd2e0b88e746ee2156eccb71b7adbab6b2c5) + + ## v2.6.0 - 2024-07-01 This minor release comes with a number of features that are focused on user friendliness and ease-of-use of the CLI and the API. diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 4d2bcab3a9..5067f789e2 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.6.0' +__version__ = '2.6.1' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From 14bb05f4b4e7fbda86682ea2cf4e3881b3a3e8dc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:18:22 +0200 Subject: [PATCH 29/42] Devops: Update pre-commit dependencies (#6504) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - [github.com/python-jsonschema/check-jsonschema: 0.28.2 → 0.28.6](https://github.com/python-jsonschema/check-jsonschema/compare/0.28.2...0.28.6) - [github.com/astral-sh/ruff-pre-commit: v0.4.1 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.1...v0.5.0) --- .pre-commit-config.yaml | 4 ++-- src/aiida/calculations/transfer.py | 2 +- src/aiida/engine/processes/functions.py | 2 +- src/aiida/orm/nodes/data/remote/base.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cad92cb781..185d5698fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: exclude: *exclude_pre_commit_hooks - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.28.2 + rev: 0.28.6 hooks: - id: check-github-workflows @@ -37,7 +37,7 @@ repos: args: [--line-length=120, --fail-on-change] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.5.0 hooks: - id: ruff-format exclude: &exclude_ruff > diff --git a/src/aiida/calculations/transfer.py b/src/aiida/calculations/transfer.py index 04290e0606..fae76aa5ed 100644 --- a/src/aiida/calculations/transfer.py +++ b/src/aiida/calculations/transfer.py @@ -55,7 +55,7 @@ def validate_instructions(instructions, _): return errmsg -def validate_transfer_inputs(inputs, _): +def validate_transfer_inputs(inputs, _ctx): """Check that the instructions dict and the source nodes are consistent""" source_nodes = inputs['source_nodes'] instructions = inputs['instructions'] diff --git a/src/aiida/engine/processes/functions.py b/src/aiida/engine/processes/functions.py index 11bd43946c..509e2d0258 100644 --- a/src/aiida/engine/processes/functions.py +++ b/src/aiida/engine/processes/functions.py @@ -85,7 +85,7 @@ def get_stack_size(size: int = 2) -> int: # type: ignore[return] """ frame = sys._getframe(size) try: - for size in itertools.count(size, 8): + for size in itertools.count(size, 8): # noqa: PLR1704 frame = frame.f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_back # type: ignore[assignment,union-attr] except AttributeError: while frame: # type: ignore[truthy-bool] diff --git a/src/aiida/orm/nodes/data/remote/base.py b/src/aiida/orm/nodes/data/remote/base.py index 760924a725..9147a58d10 100644 --- a/src/aiida/orm/nodes/data/remote/base.py +++ b/src/aiida/orm/nodes/data/remote/base.py @@ -175,8 +175,8 @@ def _clean(self, transport=None): remote_dir = self.get_remote_path() if transport is None: - with self.get_authinfo().get_transport() as transport: - clean_remote(transport, remote_dir) + with self.get_authinfo().get_transport() as _transport: + clean_remote(_transport, remote_dir) else: if transport.hostname != self.computer.hostname: raise ValueError( From d86017f42cb5359d0272694247756d547057a663 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 14 Oct 2022 09:37:44 +0200 Subject: [PATCH 30/42] Dependencies: Update requirements for `kiwipy` and `plumpy` The new version `kiwipy==0.8` and `plumpy==0.22` provide compatiblity with newer versions of `aio-pika==8.0` which comes with various connection stability improvements. The only known problem is that `Communicator.close()` times out if at least one process has been run. A test is added to capture this behavior in `tests/manage/test_manager.py` which currently fails as a `TimeoutError` is thrown. A lot of debugging has not yet led to finding the cause nor a solution. Since this behavior only seems to be appearing in the tests and does not seem to affect regular usage, the upgrade is accepted regardless. --- environment.yml | 5 ++-- pyproject.toml | 5 ++-- requirements/requirements-py-3.10.txt | 10 ++++---- requirements/requirements-py-3.11.txt | 10 ++++---- requirements/requirements-py-3.12.txt | 10 ++++---- requirements/requirements-py-3.9.txt | 10 ++++---- src/aiida/brokers/rabbitmq/broker.py | 2 +- src/aiida/engine/processes/process.py | 5 ++-- tests/manage/test_manager.py | 33 +++++++++++++++++++++++++++ 9 files changed, 61 insertions(+), 29 deletions(-) create mode 100644 tests/manage/test_manager.py diff --git a/environment.yml b/environment.yml index 98dd997ba1..99d7748c64 100644 --- a/environment.yml +++ b/environment.yml @@ -8,7 +8,6 @@ dependencies: - python~=3.9 - alembic~=1.2 - archive-path~=0.4.2 -- aio-pika~=6.6 - circus~=0.18.0 - click-spinner~=0.1.8 - click~=8.1 @@ -19,11 +18,11 @@ dependencies: - ipython>=7 - jedi<0.19 - jinja2~=3.0 -- kiwipy[rmq]~=0.7.7 +- kiwipy[rmq]~=0.8.4 - importlib-metadata~=6.0 - numpy~=1.21 - paramiko>=2.7.2,~=2.7 -- plumpy~=0.21.6 +- plumpy~=0.22.3 - pgsu~=0.2.1 - psutil~=5.6 - psycopg2-binary~=2.8 diff --git a/pyproject.toml b/pyproject.toml index c70c7a96de..5f31cef2a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ classifiers = [ dependencies = [ 'alembic~=1.2', 'archive-path~=0.4.2', - 'aio-pika~=6.6', 'circus~=0.18.0', 'click-spinner~=0.1.8', 'click~=8.1', @@ -31,11 +30,11 @@ dependencies = [ 'ipython>=7', 'jedi<0.19', 'jinja2~=3.0', - 'kiwipy[rmq]~=0.7.7', + 'kiwipy[rmq]~=0.8.4', 'importlib-metadata~=6.0', 'numpy~=1.21', 'paramiko~=2.7,>=2.7.2', - 'plumpy~=0.21.6', + 'plumpy~=0.22.3', 'pgsu~=0.2.1', 'psutil~=5.6', 'psycopg2-binary~=2.8', diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index 7bd1c23ce4..d6ca92f6c3 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -78,7 +78,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -106,7 +106,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -120,7 +120,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index db6593c6ba..95347a0980 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -77,7 +77,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -105,7 +105,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -119,7 +119,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 78f4e3a8f5..15d59944df 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.4 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.12.0 aniso8601==9.0.1 @@ -77,7 +77,7 @@ jupyter-server==2.8.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.9 -kiwipy[rmq]==0.7.8 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.5 latexcodec==2.0.1 mako==1.2.4 @@ -105,7 +105,7 @@ numpy==1.26.1 overrides==7.4.0 packaging==23.2 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.1.1 pandocfilters==1.5.0 paramiko==2.12.0 @@ -119,7 +119,7 @@ pillow==10.1.0 platformdirs==3.11.0 plotly==5.17.0 pluggy==1.3.0 -plumpy==0.21.10 +plumpy==0.22.3 prometheus-client==0.17.1 prompt-toolkit==3.0.39 psutil==5.9.6 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index a576ca238d..1a7d1b2704 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -80,7 +80,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -108,7 +108,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -122,7 +122,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/src/aiida/brokers/rabbitmq/broker.py b/src/aiida/brokers/rabbitmq/broker.py index 5321f6d400..c4ecfa2400 100644 --- a/src/aiida/brokers/rabbitmq/broker.py +++ b/src/aiida/brokers/rabbitmq/broker.py @@ -122,4 +122,4 @@ def get_rabbitmq_version(self): """ from packaging.version import parse - return parse(self.get_communicator().server_properties['version'].decode('utf-8')) + return parse(self.get_communicator().server_properties['version']) diff --git a/src/aiida/engine/processes/process.py b/src/aiida/engine/processes/process.py index f4dc9f9d69..5eabfd56f7 100644 --- a/src/aiida/engine/processes/process.py +++ b/src/aiida/engine/processes/process.py @@ -39,9 +39,10 @@ import plumpy.futures import plumpy.persistence import plumpy.processes -from aio_pika.exceptions import ConnectionClosed from kiwipy.communications import UnroutableError from plumpy.process_states import Finished, ProcessState +from plumpy.processes import ConnectionClosed # type: ignore[attr-defined] +from plumpy.processes import Process as PlumpyProcess from plumpy.utils import AttributesFrozendict from aiida import orm @@ -66,7 +67,7 @@ @plumpy.persistence.auto_persist('_parent_pid', '_enable_persistence') -class Process(plumpy.processes.Process): +class Process(PlumpyProcess): """This class represents an AiiDA process which can be executed and will have full provenance saved in the database. """ diff --git a/tests/manage/test_manager.py b/tests/manage/test_manager.py new file mode 100644 index 0000000000..4359a2ab48 --- /dev/null +++ b/tests/manage/test_manager.py @@ -0,0 +1,33 @@ +"""Tests for :mod:`aiida.manage.manager`.""" + +import pytest +from aiida import engine, orm + + +@engine.calcfunction +def add_calcfunction(data): + return orm.Int(data.value + 1) + + +@pytest.mark.requires_rmq +def test_disconnect(): + """Test the communicator disconnect. + + When the dependency ``kiwipy`` was updated to v0.8, it introduced a problem with shutting down the communicator. + After at least one process would have been run, trying to disconnect the communcitor would time out. The problem + is related to the update of the lower lying libraries ``aio-pika`` and ``aiormq`` to v9.4 and v6.8, respectively. + After much painstaking debugging the cause could not be determined, nor a solution. This test is added to + demonstrate the problematic behavior. Getting the communicator and then disconnecting it (through calling + :meth:`aiida.manage.manager.Manager.reset_profile`) works fine. However, if a process is a run before closing it, + for example running a calcfunction, the closing of the communicator will raise a ``TimeoutError``. + """ + from aiida.manage import get_manager + + manager = get_manager() + manager.get_communicator() + manager.reset_profile() # This returns just fine + + result, node = add_calcfunction.run_get_node(1) + assert node.is_finished_ok + assert result == 2 + manager.reset_profile() # This hangs before timing out From e91371573a84d4a68d6107f33c392b8718f2f26f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 13 Jul 2023 15:28:23 +0200 Subject: [PATCH 31/42] `Manager`: Catch `TimeoutError` when closing communicator The exception is caught and logged as a warning. --- src/aiida/manage/manager.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/aiida/manage/manager.py b/src/aiida/manage/manager.py index c84b39f903..8621b324f4 100644 --- a/src/aiida/manage/manager.py +++ b/src/aiida/manage/manager.py @@ -68,6 +68,8 @@ class Manager: def __init__(self) -> None: """Construct a new instance.""" + from aiida.common.log import AIIDA_LOGGER + # note: the config currently references the global variables self._broker: Optional['Broker'] = None self._profile: Optional['Profile'] = None @@ -76,6 +78,7 @@ def __init__(self) -> None: self._process_controller: Optional['RemoteProcessThreadController'] = None self._persister: Optional['AiiDAPersister'] = None self._runner: Optional['Runner'] = None + self.logger = AIIDA_LOGGER.getChild(__name__) @staticmethod def get_config(create=False) -> 'Config': @@ -165,8 +168,15 @@ def reset_profile_storage(self) -> None: def reset_broker(self) -> None: """Reset the communicator.""" + from concurrent import futures + if self._broker is not None: + try: + self._broker.close() + except futures.TimeoutError as exception: + self.logger.warning(f'Call to close the broker timed out: {exception}') self._broker.close() + self._broker = None self._process_controller = None From 89cd03c0d05881bb5bb7b36cf91a150d79922896 Mon Sep 17 00:00:00 2001 From: Kevin Lefrancois-Gagnon <138684774+kmlefran@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:25:34 -0400 Subject: [PATCH 32/42] Docs: Fix typo in pytest plugins codeblock (#6513) --- docs/source/topics/plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/topics/plugins.rst b/docs/source/topics/plugins.rst index bb371d6d63..6e7c44d1bc 100644 --- a/docs/source/topics/plugins.rst +++ b/docs/source/topics/plugins.rst @@ -344,7 +344,7 @@ To make use of these fixtures, create a ``conftest.py`` file in your ``tests`` f .. code-block:: python - pytest_plugins = 'aiida.tools.pytest_fixtures + pytest_plugins = 'aiida.tools.pytest_fixtures' Just by adding this line, the fixtures that are provided by the :mod:`~aiida.tools.pytest_fixtures` module are automatically imported. The module provides the following fixtures: From 11eefc9c5a8fdfd232d6845b462966bcdba969d8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 4 Jul 2024 11:13:21 +0200 Subject: [PATCH 33/42] Docs: Add `PluggableSchemaValidator` to nitpick exceptions (#6515) This class comes from `pydantic` and as of `pydantic==2.8.2` this is causing a warning because Sphinx cannot find the cross-reference. --- docs/source/nitpick-exceptions | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index 3e6bd2b5bb..cdfa6151e3 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -142,6 +142,7 @@ py:meth fail py:class ComputedFieldInfo py:class pydantic.fields.Field py:class pydantic.main.BaseModel +py:class PluggableSchemaValidator py:class requests.models.Response py:class requests.Response From 6d2edc919e3340b67d8097c425a5e5f6971707f8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 4 Jul 2024 15:25:13 +0200 Subject: [PATCH 34/42] CLI: Accept mulitple node identifiers in `verdi node graph generate` (#6443) The `--identifier` option allows the user to specify which identifier to label nodes in the graph with: pk, uuid or label. Here, the interface is updated to allow specifying multiple identifiers, e.g.: verdi node graph generate --identifier pk uuid -- If more than one identifier type is specified, the resulting identifiers for each node are joined using a `|` character. --- src/aiida/cmdline/commands/cmd_node.py | 9 +++-- src/aiida/tools/visualization/graph.py | 30 ++++++++------ tests/cmdline/commands/test_node.py | 2 +- tests/tools/visualization/test_graph.py | 39 +++++++++++++++++-- .../test_graph_node_identifiers_label_.txt | 9 +++++ ..._graph_node_identifiers_node_id_type3_.txt | 9 +++++ ..._graph_node_identifiers_node_id_type4_.txt | 9 +++++ .../test_graph_node_identifiers_pk_.txt | 9 +++++ .../test_graph_node_identifiers_uuid_.txt | 9 +++++ 9 files changed, 106 insertions(+), 19 deletions(-) create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt diff --git a/src/aiida/cmdline/commands/cmd_node.py b/src/aiida/cmdline/commands/cmd_node.py index 8e6ae8fba0..79efcebcef 100644 --- a/src/aiida/cmdline/commands/cmd_node.py +++ b/src/aiida/cmdline/commands/cmd_node.py @@ -15,6 +15,7 @@ from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.params import arguments, options +from aiida.cmdline.params.options.multivalue import MultipleValueOption from aiida.cmdline.params.types.plugin import PluginParamType from aiida.cmdline.utils import decorators, echo, echo_tabulate, multi_line_input from aiida.cmdline.utils.decorators import with_dbenv @@ -439,8 +440,10 @@ def verdi_graph(): ) @click.option( '--identifier', + 'identifiers', help='the type of identifier to use within the node text', - default='uuid', + default=('uuid',), + cls=MultipleValueOption, type=click.Choice(['pk', 'uuid', 'label']), ) @click.option( @@ -483,7 +486,7 @@ def verdi_graph(): def graph_generate( root_nodes, link_types, - identifier, + identifiers, ancestor_depth, descendant_depth, process_out, @@ -506,7 +509,7 @@ def graph_generate( output_file = pathlib.Path(f'{pks}.{engine}.{output_format}') echo.echo_info(f'Initiating graphviz engine: {engine}') - graph = Graph(engine=engine, node_id_type=identifier) + graph = Graph(engine=engine, node_id_type=identifiers) link_types = {'all': (), 'logic': ('input_work', 'return'), 'data': ('input_calc', 'create')}[link_types] for root_node in root_nodes: diff --git a/src/aiida/tools/visualization/graph.py b/src/aiida/tools/visualization/graph.py index 2fe7f4250e..91411796ea 100644 --- a/src/aiida/tools/visualization/graph.py +++ b/src/aiida/tools/visualization/graph.py @@ -29,6 +29,7 @@ __all__ = ('Graph', 'default_link_styles', 'default_node_styles', 'pstate_node_styles', 'default_node_sublabels') LinkAnnotateType = Literal[None, 'label', 'type', 'both'] +IdentifierType = Literal['pk', 'uuid', 'label'] class LinkStyleFunc(Protocol): @@ -254,18 +255,25 @@ def default_node_sublabels(node: orm.Node) -> str: return sublabel -def get_node_id_label(node: orm.Node, id_type: Literal['pk', 'uuid', 'label']) -> str: +NODE_IDENTIFIER_TO_LABEL = { + 'pk': lambda node: str(node.pk), + 'uuid': lambda node: node.uuid.split('-')[0], + 'label': lambda node: node.label, +} + + +def get_node_id_label(node: orm.Node, id_type: IdentifierType | list[IdentifierType]) -> str: """Return an identifier str for the node""" - if id_type == 'pk': - return str(node.pk) - if id_type == 'uuid': - return node.uuid.split('-')[0] - if id_type == 'label': - return node.label - raise ValueError(f'node_id_type not recognised: {id_type}') + + id_types = id_type if isinstance(id_type, (list, tuple)) else [id_type] + + try: + return '|'.join(NODE_IDENTIFIER_TO_LABEL[key](node) for key in id_types) + except KeyError as exception: + raise ValueError(f'`{id_type}` is not a valid `node_id_type`, choose from: pk, uuid, label') from exception -def _get_node_label(node: orm.Node, id_type: Literal['pk', 'uuid', 'label'] = 'pk') -> str: +def _get_node_label(node: orm.Node, id_type: IdentifierType | list[IdentifierType] = 'pk') -> str: """Return a label text of node and the return format is ' ()'.""" if isinstance(node, orm.Data): label = f'{node.__class__.__name__} ({get_node_id_label(node, id_type)})' @@ -287,7 +295,7 @@ def _add_graphviz_node( node_sublabel_func, style_override: None | dict = None, include_sublabels: bool = True, - id_type: Literal['pk', 'uuid', 'label'] = 'pk', + id_type: IdentifierType | list[IdentifierType] = 'pk', ): """Create a node in the graph @@ -360,7 +368,7 @@ def __init__( link_style_fn: LinkStyleFunc | None = None, node_style_fn: Callable[[orm.Node], dict] | None = None, node_sublabel_fn: Callable[[orm.Node], str] | None = None, - node_id_type: Literal['pk', 'uuid', 'label'] = 'pk', + node_id_type: IdentifierType | list[IdentifierType] = 'pk', backend: StorageBackend | None = None, ): """A class to create graphviz graphs of the AiiDA node provenance diff --git a/tests/cmdline/commands/test_node.py b/tests/cmdline/commands/test_node.py index d9c7774556..66ca83b686 100644 --- a/tests/cmdline/commands/test_node.py +++ b/tests/cmdline/commands/test_node.py @@ -392,7 +392,7 @@ def test_node_id_label_format(self, run_cli_command): filename = f'{root_node}.dot.pdf' for id_label_type in ['uuid', 'pk', 'label']: - options = ['--identifier', id_label_type, root_node] + options = ['--identifier', id_label_type, '--', root_node] try: run_cli_command(cmd_node.graph_generate, options) assert os.path.isfile(filename) diff --git a/tests/tools/visualization/test_graph.py b/tests/tools/visualization/test_graph.py index 17bda9993f..ba47b335b2 100644 --- a/tests/tools/visualization/test_graph.py +++ b/tests/tools/visualization/test_graph.py @@ -8,6 +8,8 @@ ########################################################################### """Tests for creating graphs (using graphviz)""" +import re + import pytest from aiida import orm from aiida.common import AttributeDict @@ -290,10 +292,6 @@ def test_graph_graphviz_source_pstate(self): graph = graph_mod.Graph(node_style_fn=graph_mod.pstate_node_styles) graph.recurse_descendants(nodes.pd0) - # print() - # print(graph.graphviz.source) - # graph.graphviz.render("test_graphviz_pstate", cleanup=True) - expected = """\ digraph {{ N{pd0} [label="Dict ({pd0})" color=red pencolor=black penwidth=6 shape=rectangle] @@ -325,3 +323,36 @@ def test_graph_graphviz_source_pstate(self): assert sorted([line.strip() for line in graph.graphviz.source.splitlines()]) == sorted( [line.strip() for line in expected.splitlines()] ) + + @pytest.mark.parametrize( + 'node_id_type', + ( + 'pk', + 'uuid', + 'label', + ('pk', 'uuid'), + ('pk', 'label'), + ), + ) + def test_graph_node_identifiers(self, node_id_type, monkeypatch, file_regression): + """.""" + nodes = self.create_provenance() + + # Monkeypatch the mapping of lambdas that convert return a node's identifier in string form. This is because + # the pks and uuids of the test nodes will change between each test run and this would fail the file regression. + node_identifier_to_label = { + 'pk': lambda node: '10', + 'uuid': lambda node: '16739459', + 'label': lambda node: 'some-label', + } + monkeypatch.setattr(graph_mod, 'NODE_IDENTIFIER_TO_LABEL', node_identifier_to_label) + + graph = graph_mod.Graph(node_id_type=node_id_type) + graph.recurse_descendants(nodes.calcf1) + + # The order of certain output lines can be randomly ordered so we split the file in lines, sort, and then join + # them into a single string again. The node identifiers generated by the engine are of the form ``N{pk}`` and + # they also clearly vary, so they are replaced with the ``NODE`` placeholder. + string = '\n'.join(sorted(graph.graphviz.source.strip().split('\n'))) + string = re.sub(r'N\d+', 'NODE', string) + file_regression.check(string) diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt new file mode 100644 index 0000000000..2c23527560 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (some-label) + NODE [label="Dict (some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt new file mode 100644 index 0000000000..7644ab1562 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10|16739459) + NODE [label="Dict (10|16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10|16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt new file mode 100644 index 0000000000..87b15dcf5c --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10|some-label) + NODE [label="Dict (10|some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10|some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt new file mode 100644 index 0000000000..ec6b88d6c2 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10) + NODE [label="Dict (10)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt new file mode 100644 index 0000000000..b4ca47ff60 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (16739459) + NODE [label="Dict (16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file From f9924437070c67a9505f2f3b70a2d6d303acd38a Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 5 Jul 2024 09:00:54 +0200 Subject: [PATCH 35/42] Engine: Fix bug in upload calculation for `PortableCode` with SSH (#6519) When a `CalcJob` would be run with a `PortableCode` using a computer configured with the `core.ssh` transport plugin, the upload task would except. The `aiida.engine.daemon.execmanager.upload_calculation` method is passing `pathlib.Path` objects to the transport interface which is not supported. By chance this does not raise an exception when using the `LocalTransport`, but the `SshTransport` passes these values to the paramiko library which does choke on anything but strings. The use of a `PortableCode` was tested for in the unit test `tests/engine/processes/calcjobs/test_calc_job.py:test_portable_code` but this would only use a local transport and thus the bug would not appear. Parametrizing it to also use the `SshTransport` wouldn't help since the test uses `metadata.dry_run = True`, whose implementation will always swap the transport to a local one, still avoiding the bugged code pathway. Instead a test is added that directly calls `upload_calculation` which parametrizes over all installed transport plugins and uses a `PortableCode`. This confirmed the bug. The `upload_calculation` method is updated to ensure casting all `pathlib.Path` objects to `str` before passing it to the transport. --- src/aiida/engine/daemon/execmanager.py | 8 +++---- tests/engine/daemon/test_execmanager.py | 31 ++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/src/aiida/engine/daemon/execmanager.py b/src/aiida/engine/daemon/execmanager.py index f7517a0580..6f2a42fa15 100644 --- a/src/aiida/engine/daemon/execmanager.py +++ b/src/aiida/engine/daemon/execmanager.py @@ -178,11 +178,11 @@ def upload_calculation( # Note: this will possibly overwrite files for root, dirnames, filenames in code.base.repository.walk(): # mkdir of root - transport.makedirs(root, ignore_existing=True) + transport.makedirs(str(root), ignore_existing=True) # remotely mkdir first for dirname in dirnames: - transport.makedirs((root / dirname), ignore_existing=True) + transport.makedirs(str(root / dirname), ignore_existing=True) # Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in # combination with the new `Transport.put_object_from_filelike` @@ -192,8 +192,8 @@ def upload_calculation( content = code.base.repository.get_object_content((pathlib.Path(root) / filename), mode='rb') handle.write(content) handle.flush() - transport.put(handle.name, (root / filename)) - transport.chmod(code.filepath_executable, 0o755) # rwxr-xr-x + transport.put(handle.name, str(root / filename)) + transport.chmod(str(code.filepath_executable), 0o755) # rwxr-xr-x # local_copy_list is a list of tuples, each with (uuid, dest_path, rel_path) # NOTE: validation of these lists are done inside calculation.presubmit() diff --git a/tests/engine/daemon/test_execmanager.py b/tests/engine/daemon/test_execmanager.py index d5fc8fdbcc..bb4209659d 100644 --- a/tests/engine/daemon/test_execmanager.py +++ b/tests/engine/daemon/test_execmanager.py @@ -15,7 +15,7 @@ from aiida.common.datastructures import CalcInfo, CodeInfo, FileCopyOperation from aiida.common.folders import SandboxFolder from aiida.engine.daemon import execmanager -from aiida.orm import CalcJobNode, FolderData, RemoteData, SinglefileData +from aiida.orm import CalcJobNode, FolderData, PortableCode, RemoteData, SinglefileData from aiida.plugins import entry_point from aiida.transports.plugins.local import LocalTransport @@ -585,3 +585,32 @@ def test_upload_combinations( filepath_workdir = pathlib.Path(node.get_remote_workdir()) assert serialize_file_hierarchy(filepath_workdir, read_bytes=False) == expected_hierarchy + + +def test_upload_calculation_portable_code(fixture_sandbox, node_and_calc_info, tmp_path): + """Test ``upload_calculation`` with a ``PortableCode`` for different transports. + + Regression test for https://github.com/aiidateam/aiida-core/issues/6518 + """ + subdir = tmp_path / 'sub' + subdir.mkdir() + (subdir / 'some-file').write_bytes(b'sub dummy') + (tmp_path / 'bash').write_bytes(b'bash implementation') + + code = PortableCode( + filepath_executable='bash', + filepath_files=tmp_path, + ).store() + + node, calc_info = node_and_calc_info + code_info = CodeInfo() + code_info.code_uuid = code.uuid + calc_info.codes_info = [code_info] + + with node.computer.get_transport() as transport: + execmanager.upload_calculation( + node, + transport, + calc_info, + fixture_sandbox, + ) From 6196dcd3b321758ae8dfb84b22a59e1c77d8e933 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 5 Jul 2024 09:49:20 +0200 Subject: [PATCH 36/42] `SqliteDosStorage`: Make the migrator compatible with SQLite (#6429) The majority of the `SqliteDosStorage` piggy-backs off of the `PsqlDosStorage` plugin. It also uses the `PsqlDosMigrator` as-is to perform the database migrations. This is not safe however, as PostgreSQL and SQLite do not have exactly the same syntax. An example is the `main_0002` revision which was added to drop the hashes of certain nodes. This uses the `#-` operator which is JSONB specific syntax of PostgreSQL and is not supported by SQLite. Since this migration was added before the `SqliteDosStorage` plugin was added, this has never caused a problems as all profiles would be new, would not have any nodes and therefore the SQL code of the migration would not actually be executed. In preparation for any future migrations that may need to be added, the `SqliteDosStorage` now uses the `SqliteDosMigrator`. This subclasses the `PsqlDosMigrator` as it can still use most of the functionality, but it changes a few critical things. Most notably the location of the schema versions which now are kept individually and are no longer lent from the `core.psql_dos` plugin. The initial version `main_0001_initial.py` is taken from the migration `main_0000_initial.py` of the `core.sqlite_zip` storage plugin. The only difference is that UUID fields are declared as `String(32)` instead of `CHAR(32)`. The SQLAlchemy models that are automatically generated for SQLite from the PostgreSQL-based models actually use the latter type. See `aiida.storage.sqlite_zip.models:pg_to_sqlite`. --- src/aiida/storage/migrations.py | 8 + src/aiida/storage/psql_dos/migrator.py | 8 +- src/aiida/storage/sqlite_dos/backend.py | 123 +++++++- .../storage/sqlite_dos/migrations/env.py | 54 ++++ .../migrations/versions/main_0001_initial.py | 198 +++++++++++++ .../main_0002_recompute_hash_calc_job_node.py | 84 ++++++ .../storage/sqlite_zip/migrations/env.py | 2 +- tests/cmdline/commands/test_status.py | 2 + .../storage/sqlite_dos/migrations/conftest.py | 76 +++++ .../sqlite_dos/migrations/test_all_schema.py | 49 ++++ .../test_head_vs_orm_main_0002_.yml | 269 ++++++++++++++++++ .../test_all_schema/test_main_main_0001_.yml | 255 +++++++++++++++++ .../test_all_schema/test_main_main_0002_.yml | 255 +++++++++++++++++ 13 files changed, 1360 insertions(+), 23 deletions(-) create mode 100644 src/aiida/storage/migrations.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/env.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py create mode 100644 tests/storage/sqlite_dos/migrations/conftest.py create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema.py create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml diff --git a/src/aiida/storage/migrations.py b/src/aiida/storage/migrations.py new file mode 100644 index 0000000000..c37cbab641 --- /dev/null +++ b/src/aiida/storage/migrations.py @@ -0,0 +1,8 @@ +"""Module with common resources related to storage migrations.""" + +TEMPLATE_INVALID_SCHEMA_VERSION = """ +Database schema version `{schema_version_database}` is incompatible with the required schema version `{schema_version_code}`. +To migrate the database schema version to the current one, run the following command: + + verdi -p {profile_name} storage migrate +""" # noqa: E501 diff --git a/src/aiida/storage/psql_dos/migrator.py b/src/aiida/storage/psql_dos/migrator.py index 3ea36b9307..5251fd49de 100644 --- a/src/aiida/storage/psql_dos/migrator.py +++ b/src/aiida/storage/psql_dos/migrator.py @@ -33,6 +33,7 @@ from aiida.common import exceptions from aiida.manage.configuration.profile import Profile from aiida.storage.log import MIGRATE_LOGGER +from aiida.storage.migrations import TEMPLATE_INVALID_SCHEMA_VERSION from aiida.storage.psql_dos.models.settings import DbSetting from aiida.storage.psql_dos.utils import create_sqlalchemy_engine @@ -46,13 +47,6 @@ verdi -p {profile_name} storage migrate """ -TEMPLATE_INVALID_SCHEMA_VERSION = """ -Database schema version `{schema_version_database}` is incompatible with the required schema version `{schema_version_code}`. -To migrate the database schema version to the current one, run the following command: - - verdi -p {profile_name} storage migrate -""" # noqa: E501 - ALEMBIC_REL_PATH = 'migrations' REPOSITORY_UUID_KEY = 'repository|uuid' diff --git a/src/aiida/storage/sqlite_dos/backend.py b/src/aiida/storage/sqlite_dos/backend.py index 3b13764b3d..7be70f4a1c 100644 --- a/src/aiida/storage/sqlite_dos/backend.py +++ b/src/aiida/storage/sqlite_dos/backend.py @@ -10,31 +10,36 @@ from __future__ import annotations +import pathlib from functools import cached_property, lru_cache from pathlib import Path from shutil import rmtree from typing import TYPE_CHECKING, Optional from uuid import uuid4 +from alembic.config import Config from disk_objectstore import Container, backup_utils from pydantic import BaseModel, Field, field_validator -from sqlalchemy import insert +from sqlalchemy import insert, inspect, select from sqlalchemy.orm import scoped_session, sessionmaker from aiida.common import exceptions from aiida.common.log import AIIDA_LOGGER -from aiida.manage import Profile +from aiida.manage.configuration.profile import Profile from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER from aiida.orm.implementation import BackendEntity +from aiida.storage.log import MIGRATE_LOGGER from aiida.storage.psql_dos.models.settings import DbSetting from aiida.storage.sqlite_zip import models, orm -from aiida.storage.sqlite_zip.migrator import get_schema_version_head from aiida.storage.sqlite_zip.utils import create_sqla_engine +from ..migrations import TEMPLATE_INVALID_SCHEMA_VERSION from ..psql_dos import PsqlDosBackend -from ..psql_dos.migrator import REPOSITORY_UUID_KEY, PsqlDosMigrator +from ..psql_dos.migrator import PsqlDosMigrator if TYPE_CHECKING: + from disk_objectstore import Container + from aiida.orm.entities import EntityTypes from aiida.repository.backend import DiskObjectStoreRepositoryBackend @@ -45,15 +50,26 @@ FILENAME_CONTAINER = 'container' +ALEMBIC_REL_PATH = 'migrations' + +REPOSITORY_UUID_KEY = 'repository|uuid' + + class SqliteDosMigrator(PsqlDosMigrator): - """Storage implementation using Sqlite database and disk-objectstore container. + """Class for validating and migrating `sqlite_dos` storage instances. - This storage backend is not recommended for use in production. The sqlite database is not the most performant and it - does not support all the ``QueryBuilder`` functionality that is supported by the ``core.psql_dos`` storage backend. - This storage is ideally suited for use cases that want to test or demo AiiDA as it requires no server but just a - folder on the local filesystem. + .. important:: This class should only be accessed via the storage backend class (apart from for test purposes) + + The class subclasses the ``PsqlDosMigrator``. It essentially changes two things in the implementation: + + * Changes the path to the migration version files. This allows custom migrations to be written for SQLite-based + storage plugins, which is necessary since the PSQL-based migrations may use syntax that is not compatible. + * The logic for validating the storage is significantly simplified since the SQLite-based storage plugins do not + have to take legacy Django-based implementations into account. """ + alembic_version_tbl_name = 'alembic_version' + def __init__(self, profile: Profile) -> None: filepath_database = Path(profile.storage_config['filepath']) / FILENAME_DATABASE filepath_database.touch() @@ -91,6 +107,86 @@ def initialise_database(self) -> None: context.stamp(context.script, 'main@head') # type: ignore[arg-type] self.connection.commit() + def get_schema_version_profile(self) -> Optional[str]: # type: ignore[override] + """Return the schema version of the backend instance for this profile. + + Note, the version will be None if the database is empty or is a legacy django database. + """ + with self._migration_context() as context: + return context.get_current_revision() + + @staticmethod + def _alembic_config(): + """Return an instance of an Alembic `Config`.""" + dirpath = pathlib.Path(__file__).resolve().parent + config = Config() + config.set_main_option('script_location', str(dirpath / ALEMBIC_REL_PATH)) + return config + + def validate_storage(self) -> None: + """Validate that the storage for this profile + + 1. That the database schema is at the head version, i.e. is compatible with the code API. + 2. That the repository ID is equal to the UUID set in the database + + :raises: :class:`aiida.common.exceptions.UnreachableStorage` if the storage cannot be connected to + :raises: :class:`aiida.common.exceptions.IncompatibleStorageSchema` + if the storage is not compatible with the code API. + :raises: :class:`aiida.common.exceptions.CorruptStorage` + if the repository ID is not equal to the UUID set in thedatabase. + """ + # check there is an alembic_version table from which to get the schema version + if not inspect(self.connection).has_table(self.alembic_version_tbl_name): + raise exceptions.IncompatibleStorageSchema('The database has no known version.') + + # now we can check that the alembic version is the latest + schema_version_code = self.get_schema_version_head() + schema_version_database = self.get_schema_version_profile() + if schema_version_database != schema_version_code: + raise exceptions.IncompatibleStorageSchema( + TEMPLATE_INVALID_SCHEMA_VERSION.format( + schema_version_database=schema_version_database, + schema_version_code=schema_version_code, + profile_name=self.profile.name, + ) + ) + + # finally, we check that the ID set within the disk-objectstore is equal to the one saved in the database, + # i.e. this container is indeed the one associated with the db + repository_uuid = self.get_repository_uuid() + stmt = select(DbSetting.val).where(DbSetting.key == REPOSITORY_UUID_KEY) + database_repository_uuid = self.connection.execute(stmt).scalar_one_or_none() + if database_repository_uuid is None: + raise exceptions.CorruptStorage('The database has no repository UUID set.') + if database_repository_uuid != repository_uuid: + raise exceptions.CorruptStorage( + f'The database has a repository UUID configured to {database_repository_uuid} ' + f"but the disk-objectstore's is {repository_uuid}." + ) + + @property + def is_database_initialised(self) -> bool: + """Return whether the database is initialised. + + This is the case if it contains the table that holds the schema version for alembic. + + :returns: ``True`` if the database is initialised, ``False`` otherwise. + """ + return inspect(self.connection).has_table(self.alembic_version_tbl_name) + + def migrate(self) -> None: + """Migrate the storage for this profile to the head version. + + :raises: :class:`~aiida.common.exceptions.UnreachableStorage` if the storage cannot be accessed. + :raises: :class:`~aiida.common.exceptions.StorageMigrationError` if the storage is not initialised. + """ + if not inspect(self.connection).has_table(self.alembic_version_tbl_name): + raise exceptions.StorageMigrationError('storage is uninitialised, cannot migrate.') + + MIGRATE_LOGGER.report('Migrating to the head of the main branch') + self.migrate_up('main@head') + self.connection.commit() + class SqliteDosStorage(PsqlDosBackend): """A lightweight storage that is easy to install. @@ -178,12 +274,9 @@ def get_repository(self) -> 'DiskObjectStoreRepositoryBackend': return DiskObjectStoreRepositoryBackend(container=self.get_container()) @classmethod - def version_head(cls) -> str: - return get_schema_version_head() - - @classmethod - def version_profile(cls, profile: Profile) -> str | None: - return get_schema_version_head() + def version_profile(cls, profile: Profile) -> Optional[str]: + with cls.migrator_context(profile) as migrator: + return migrator.get_schema_version_profile() def query(self) -> orm.SqliteQueryBuilder: return orm.SqliteQueryBuilder(self) diff --git a/src/aiida/storage/sqlite_dos/migrations/env.py b/src/aiida/storage/sqlite_dos/migrations/env.py new file mode 100644 index 0000000000..e2beb1ad9f --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/env.py @@ -0,0 +1,54 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Environment configuration to be used by alembic to perform database migrations.""" + +from alembic import context + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + The connection should have been passed to the config, which we use to configure the migration context. + """ + from aiida.storage.sqlite_zip.models import SqliteBase + + config = context.config + + connection = config.attributes.get('connection', None) + aiida_profile = config.attributes.get('aiida_profile', None) + on_version_apply = config.attributes.get('on_version_apply', None) + + if connection is None: + from aiida.common.exceptions import ConfigurationError + + raise ConfigurationError('An initialized connection is expected for the AiiDA online migrations.') + if aiida_profile is None: + from aiida.common.exceptions import ConfigurationError + + raise ConfigurationError('An aiida_profile is expected for the AiiDA online migrations.') + + context.configure( + connection=connection, + target_metadata=SqliteBase.metadata, + transaction_per_migration=True, + aiida_profile=aiida_profile, + on_version_apply=on_version_apply, + ) + + context.run_migrations() + + +try: + if context.is_offline_mode(): + raise NotImplementedError('This feature is not currently supported.') + + run_migrations_online() +except NameError: + # This will occur in an environment that is just compiling the documentation + pass diff --git a/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py b/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py new file mode 100644 index 0000000000..6af0887766 --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py @@ -0,0 +1,198 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Initial main branch schema + +This schema is mainly equivalent to the `main_0000_initial.py` schema of the `sqlite_zip` backend. Except that UUID +columns use ``String(32)`` instead of ``CHAR(32)``. + +Revision ID: main_0001 +Revises: +Create Date: 2024-05-29 +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects.sqlite import JSON + +revision = 'main_0001' +down_revision = None +branch_labels = ('main',) +depends_on = None + + +def upgrade(): + """Migrations for the upgrade.""" + op.create_table( + 'db_dbcomputer', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('label', sa.String(length=255), nullable=False, unique=True), + sa.Column('hostname', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('scheduler_type', sa.String(length=255), nullable=False), + sa.Column('transport_type', sa.String(length=255), nullable=False), + sa.Column('metadata', JSON(), nullable=False), + ) + op.create_table( + 'db_dbuser', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('email', sa.String(length=254), nullable=False, unique=True), + sa.Column('first_name', sa.String(length=254), nullable=False), + sa.Column('last_name', sa.String(length=254), nullable=False), + sa.Column('institution', sa.String(length=254), nullable=False), + ) + op.create_table( + 'db_dbauthinfo', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('aiidauser_id', sa.Integer(), nullable=False, index=True), + sa.Column('dbcomputer_id', sa.Integer(), nullable=False, index=True), + sa.Column('metadata', JSON(), nullable=False), + sa.Column('auth_params', JSON(), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['aiidauser_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['dbcomputer_id'], + ['db_dbcomputer.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.UniqueConstraint('aiidauser_id', 'dbcomputer_id'), + ) + op.create_table( + 'db_dbgroup', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('type_string', sa.String(length=255), nullable=False, index=True), + sa.Column('time', sa.DateTime(timezone=True), nullable=False), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('extras', JSON(), nullable=False), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.UniqueConstraint('label', 'type_string'), + ) + + op.create_table( + 'db_dbnode', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('node_type', sa.String(length=255), nullable=False, index=True), + sa.Column('process_type', sa.String(length=255), nullable=True, index=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('ctime', sa.DateTime(timezone=True), nullable=False, index=True), + sa.Column('mtime', sa.DateTime(timezone=True), nullable=False, index=True), + sa.Column('attributes', JSON(), nullable=True), + sa.Column('extras', JSON(), nullable=True), + sa.Column('repository_metadata', JSON(), nullable=False), + sa.Column('dbcomputer_id', sa.Integer(), nullable=True, index=True), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint( + ['dbcomputer_id'], + ['db_dbcomputer.id'], + ondelete='RESTRICT', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='restrict', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dbcomment', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('ctime', sa.DateTime(timezone=True), nullable=False), + sa.Column('mtime', sa.DateTime(timezone=True), nullable=False), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.Column('content', sa.Text(), nullable=False), + sa.ForeignKeyConstraint( + ['dbnode_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dbgroup_dbnodes', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('dbgroup_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint(['dbgroup_id'], ['db_dbgroup.id'], initially='DEFERRED', deferrable=True), + sa.ForeignKeyConstraint(['dbnode_id'], ['db_dbnode.id'], initially='DEFERRED', deferrable=True), + sa.UniqueConstraint('dbgroup_id', 'dbnode_id'), + ) + op.create_table( + 'db_dblink', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('input_id', sa.Integer(), nullable=False, index=True), + sa.Column('output_id', sa.Integer(), nullable=False, index=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('type', sa.String(length=255), nullable=False, index=True), + sa.ForeignKeyConstraint(['input_id'], ['db_dbnode.id'], initially='DEFERRED', deferrable=True), + sa.ForeignKeyConstraint( + ['output_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dblog', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('time', sa.DateTime(timezone=True), nullable=False), + sa.Column('loggername', sa.String(length=255), nullable=False, index=True), + sa.Column('levelname', sa.String(length=50), nullable=False, index=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('message', sa.Text(), nullable=False), + sa.Column('metadata', JSON(), nullable=False), + sa.ForeignKeyConstraint( + ['dbnode_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + +def downgrade(): + """Migrations for the downgrade.""" + raise NotImplementedError('Downgrade of main_0000.') diff --git a/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py b/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py new file mode 100644 index 0000000000..ae70c45c4c --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py @@ -0,0 +1,84 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Drop the hashes for all ``CalcJobNode`` instances. + +The computed hash erroneously included the hash of the file repository. This was present as of v2.0 and so all nodes +created with versions since then will have incorrect hashes. + +Revision ID: main_0002 +Revises: main_0001 +Create Date: 2024-05-29 +""" + +from __future__ import annotations + +from aiida.common.log import AIIDA_LOGGER +from alembic import op + +LOGGER = AIIDA_LOGGER.getChild(__file__) + +revision = 'main_0002' +down_revision = 'main_0001' +branch_labels = None +depends_on = None + + +def drop_hashes(conn, hash_extra_key: str, entry_point_string: str | None = None) -> None: + """Drop hashes of nodes. + + Print warning only if the DB actually contains nodes. + + :param hash_extra_key: The key in the extras used to store the hash at the time of this migration. + :param entry_point_string: Optional entry point string of a node type to narrow the subset of nodes to reset. The + value should be a complete entry point string, e.g., ``aiida.node:process.calculation.calcjob`` to drop the hash + of all ``CalcJobNode`` rows. + """ + from aiida.orm.utils.node import get_type_string_from_class + from aiida.plugins import load_entry_point_from_string + from sqlalchemy.sql import text + + if entry_point_string is not None: + entry_point = load_entry_point_from_string(entry_point_string) + node_type = get_type_string_from_class(entry_point.__module__, entry_point.__name__) + else: + node_type = None + + if node_type: + statement_count = text(f"SELECT count(*) FROM db_dbnode WHERE node_type = '{node_type}';") + statement_update = text( + f"UPDATE db_dbnode SET extras = json_remove(db_dbnode.extras, '$.{hash_extra_key}') WHERE node_type = '{node_type}';" # noqa: E501 + ) + else: + statement_count = text('SELECT count(*) FROM db_dbnode;') + statement_update = text(f"UPDATE db_dbnode SET extras = json_remove(db_dbnode.extras, '$.{hash_extra_key}');") + + node_count = conn.execute(statement_count).fetchall()[0][0] + + if node_count > 0: + if entry_point_string: + msg = f'Invalidating the hashes of certain nodes. Please run `verdi node rehash -e {entry_point_string}`.' + else: + msg = 'Invalidating the hashes of all nodes. Please run `verdi node rehash`.' + LOGGER.warning(msg) + + conn.execute(statement_update) + + +def upgrade(): + """Migrations for the upgrade.""" + drop_hashes( + op.get_bind(), hash_extra_key='_aiida_hash', entry_point_string='aiida.node:process.calculation.calcjob' + ) + + +def downgrade(): + """Migrations for the downgrade.""" + drop_hashes( + op.get_bind(), hash_extra_key='_aiida_hash', entry_point_string='aiida.node:process.calculation.calcjob' + ) diff --git a/src/aiida/storage/sqlite_zip/migrations/env.py b/src/aiida/storage/sqlite_zip/migrations/env.py index 73abbd917b..5691a95568 100644 --- a/src/aiida/storage/sqlite_zip/migrations/env.py +++ b/src/aiida/storage/sqlite_zip/migrations/env.py @@ -6,7 +6,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -"""Upper level SQLAlchemy migration funcitons.""" +"""Upper level SQLAlchemy migration functions.""" from alembic import context diff --git a/tests/cmdline/commands/test_status.py b/tests/cmdline/commands/test_status.py index d02aff07d2..a4b81dbfc6 100644 --- a/tests/cmdline/commands/test_status.py +++ b/tests/cmdline/commands/test_status.py @@ -68,6 +68,7 @@ def test_storage_unable_to_connect(run_cli_command): profile._attributes['storage']['config']['database_port'] = old_port +@pytest.mark.requires_psql def test_storage_incompatible(run_cli_command, monkeypatch): """Test `verdi status` when storage schema version is incompatible with that of the code.""" @@ -83,6 +84,7 @@ def storage_cls(*args, **kwargs): assert result.exit_code is ExitCode.CRITICAL +@pytest.mark.requires_psql def test_storage_corrupted(run_cli_command, monkeypatch): """Test `verdi status` when the storage is found to be corrupt (e.g. non-matching repository UUIDs).""" diff --git a/tests/storage/sqlite_dos/migrations/conftest.py b/tests/storage/sqlite_dos/migrations/conftest.py new file mode 100644 index 0000000000..bba974705f --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/conftest.py @@ -0,0 +1,76 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for the migration engine (Alembic) as well as for the AiiDA migrations for SQLAlchemy.""" + +import collections +import pathlib + +import pytest +from aiida.manage.configuration import Profile +from aiida.storage.sqlite_zip.utils import create_sqla_engine +from sqlalchemy import text + + +@pytest.fixture +def uninitialised_profile(tmp_path): + """Create a profile attached to an empty database and repository folder.""" + + yield Profile( + 'test_migrate', + { + 'test_profile': True, + 'storage': { + 'backend': 'core.sqlite_dos', + 'config': { + 'filepath': str(tmp_path), + }, + }, + 'process_control': {'backend': 'null', 'config': {}}, + }, + ) + + +def _generate_schema(profile: Profile) -> dict: + """Create a dict containing indexes of AiiDA tables.""" + with create_sqla_engine(pathlib.Path(profile.storage_config['filepath']) / 'database.sqlite').connect() as conn: + data = collections.defaultdict(list) + for type_, name, tbl_name, rootpage, sql in conn.execute(text('SELECT * FROM sqlite_master;')): + lines_sql = sql.strip().split('\n') if sql else [] + + # For an unknown reason, the ``sql`` is not deterministic as the order of the ``CONSTRAINTS`` rules seem to + # be in random order. To make sure they are always in the same order, they have to be ordered manually. + if type_ == 'table': + lines_constraints = [] + lines_other = [] + for line in lines_sql: + stripped = line.strip().strip(',') + + if 'CONSTRAINT' in stripped: + lines_constraints.append(stripped) + else: + lines_other.append(stripped) + + lines_sql = lines_other + sorted(lines_constraints) + data[type_].append((name, tbl_name, lines_sql)) + + for key in data.keys(): + data[key] = sorted(data[key], key=lambda v: v[0]) + + return dict(data) + + +@pytest.fixture +def reflect_schema(): + """A fixture to generate the schema of AiiDA tables for a given profile.""" + + def factory(profile: Profile) -> dict: + """Create a dict containing all tables and fields of AiiDA tables.""" + return _generate_schema(profile) + + return factory diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema.py b/tests/storage/sqlite_dos/migrations/test_all_schema.py new file mode 100644 index 0000000000..51351f918e --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema.py @@ -0,0 +1,49 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Basic tests for all migrations""" + +import pytest +from aiida.storage.sqlite_dos.backend import SqliteDosMigrator + + +@pytest.mark.parametrize('version', list(v for v in SqliteDosMigrator.get_schema_versions() if v.startswith('main'))) +def test_main(version, uninitialised_profile, reflect_schema, data_regression): + """Test that the migrations produce the expected database schema.""" + migrator = SqliteDosMigrator(uninitialised_profile) + migrator.migrate_up(f'main@{version}') + data_regression.check(reflect_schema(uninitialised_profile)) + + +def test_main_initialized(uninitialised_profile): + """Test that ``migrate`` properly stamps the new schema version when updating database with existing schema.""" + migrator = SqliteDosMigrator(uninitialised_profile) + + # Initialize database at first version of main branch + migrator.migrate_up('main@main_0001') + assert migrator.get_schema_version_profile() == 'main_0001' + migrator.close() + + # Reinitialize the migrator to make sure we are fetching actual state of database and not in-memory state and then + # migrate to head schema version. + migrator = SqliteDosMigrator(uninitialised_profile) + migrator.migrate() + migrator.close() + + # Reinitialize the migrator to make sure we are fetching actual state of database and not in-memory state and then + # assert that the database version is properly set to the head schema version + migrator = SqliteDosMigrator(uninitialised_profile) + assert migrator.get_schema_version_profile() == migrator.get_schema_version_head() + + +def test_head_vs_orm(uninitialised_profile, reflect_schema, data_regression): + """Test that the migrations produce the same database schema as the models.""" + migrator = SqliteDosMigrator(uninitialised_profile) + head_version = migrator.get_schema_version_head() + migrator.initialise() + data_regression.check(reflect_schema(uninitialised_profile), basename=f'test_head_vs_orm_{head_version}_') diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml new file mode 100644 index 0000000000..b70a576550 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml @@ -0,0 +1,269 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbsetting_1 + - db_dbsetting + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbsetting + - db_dbsetting + - - CREATE TABLE db_dbsetting ( + - id INTEGER NOT NULL + - '"key" VARCHAR(1024) NOT NULL' + - val JSON + - description TEXT NOT NULL + - time DATETIME NOT NULL + - ) + - CONSTRAINT db_dbsetting_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbsetting_key UNIQUE ("key") +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml new file mode 100644 index 0000000000..3b49696512 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml @@ -0,0 +1,255 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE restrict DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml new file mode 100644 index 0000000000..3b49696512 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml @@ -0,0 +1,255 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE restrict DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) From 5c1f5d6fcb3dcbe51fe7c8eda88daf123f191635 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Fri, 5 Jul 2024 10:15:23 +0200 Subject: [PATCH 37/42] Docs: Add `robots.txt` to only allow indexing of `latest` and `stable` (#6517) Currently, all versions of the documentation are indexed with the result that google searches come up with very outdated versions and the latest version is almost impossible to find. The `robots.txt` now disallows any path from being indexed except for the `latest` and `stable` versions of the documentation. --- docs/source/conf.py | 3 +++ docs/source/robots.txt | 4 ++++ 2 files changed, 7 insertions(+) create mode 100644 docs/source/robots.txt diff --git a/docs/source/conf.py b/docs/source/conf.py index d017051cb3..03922a0efa 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -171,6 +171,9 @@ # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False +# This is to tell search engines to index only stable and latest version +html_extra_path = ['robots.txt'] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/source/robots.txt b/docs/source/robots.txt new file mode 100644 index 0000000000..f94eda030d --- /dev/null +++ b/docs/source/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Allow: /projects/aiida-core/en/latest/ +Allow: /projects/aiida-core/en/stable/ +Disallow: / From a3f734d8a89715ea27f273df64e130cac89957a6 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Fri, 5 Jul 2024 13:14:35 +0200 Subject: [PATCH 38/42] Docs: Update `redirects.txt` for installation pages (#6509) --- docs/source/redirects.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/source/redirects.txt b/docs/source/redirects.txt index 50f8ed2029..594ba48d66 100644 --- a/docs/source/redirects.txt +++ b/docs/source/redirects.txt @@ -13,6 +13,13 @@ install/installation.rst installation/index.rst install/configuration.rst howto/installation.rst install/updating_installation.rst howto/installation.rst install/troubleshooting.rst installation/troubleshooting.rst +intro/get_started.rst installation/index.rst +intro/install_system.rst installation/index.rst +intro/install_conda.rst installation/index.rst +intro/installation.rst installation/index.rst +intro/run_docker.rst installation/docker.rst +intro/tutorial.md tutorials/index.rst +intro/about.rst intro/index.rst restapi/index.rst reference/rest_api.rst verdi/verdi_user_guide.rst topics/cli.rst working_with_aiida/index.rst howto/index.rst From c740b99f2bfe366a733f140164a21048cd51198e Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 5 Jul 2024 15:53:57 +0100 Subject: [PATCH 39/42] Docker: Fix release tag in publish workflow (#6520) The `AIIDA_VERSION` variable was no longer present in the env. It is now retrieved separately from the tag version. --- .github/workflows/docker-publish.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index cd290a5a7c..53969ae2d8 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -59,6 +59,12 @@ jobs: vars=$(cat build.json | jq -c '[.variable | to_entries[] | {"key": .key, "value": .value.default}] | from_entries') echo "vars=$vars" | tee -a "${GITHUB_OUTPUT}" + - id: get-version + if: ${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} + run: | + tag="${{ github.ref_name }}" + echo "AIIDA_VERSION=${tag#v}" >> $GITHUB_OUTPUT + - name: Docker meta id: meta uses: docker/metadata-action@v5 @@ -69,7 +75,7 @@ jobs: type=ref,event=pr type=ref,event=branch,enable=${{ github.ref_name != 'main' }} type=edge,enable={{is_default_branch}} - type=raw,value=aiida-${{ env.AIIDA_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} + type=raw,value=aiida-${{ steps.get-version.outputs.AIIDA_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=raw,value=python-${{ env.PYTHON_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=raw,value=postgresql-${{ env.PGSQL_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=match,pattern=v(\d{4}\.\d{4}(-.+)?),group=1 From a5da4eda131f844c3639bdb01a256b9e9a7873a2 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Wed, 10 Jul 2024 11:25:36 +0200 Subject: [PATCH 40/42] Devops: Mark `test_leak_ssh_calcjob` as nightly (#6521) It is a very slow test and is unlikely to be affected by typical changes to the codebase. --- tests/engine/test_memory_leaks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/engine/test_memory_leaks.py b/tests/engine/test_memory_leaks.py index 9cfbe7653a..4eece939f3 100644 --- a/tests/engine/test_memory_leaks.py +++ b/tests/engine/test_memory_leaks.py @@ -65,6 +65,7 @@ def test_leak_local_calcjob(aiida_code_installed): @pytest.mark.skipif(sys.version_info >= (3, 12), reason='Garbage collecting hangs on Python 3.12') +@pytest.mark.nightly @pytest.mark.usefixtures('aiida_profile', 'check_memory_leaks') def test_leak_ssh_calcjob(aiida_computer_ssh): """Test whether running a CalcJob over SSH leaks memory. From 9355a9878134b7c8e3e75bb029c251f0bf0a7357 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Wed, 10 Jul 2024 13:53:47 +0200 Subject: [PATCH 41/42] CLI: Add default for `output_file` in computer and code export commands (#6486) --- src/aiida/cmdline/commands/cmd_code.py | 33 ++-- src/aiida/cmdline/commands/cmd_computer.py | 52 ++++--- src/aiida/cmdline/params/options/__init__.py | 1 + src/aiida/cmdline/params/options/main.py | 10 ++ src/aiida/cmdline/utils/common.py | 22 +++ tests/cmdline/commands/test_code.py | 74 +++++++-- .../test_code/test_code_export_True_.yml | 8 - ...e_.yml => test_code_export___no_sort_.yml} | 0 ...xport.yml => test_code_export___sort_.yml} | 0 tests/cmdline/commands/test_computer.py | 147 +++++++++++++++--- .../test_computer_export_setup___no_sort_.yml | 13 ++ .../test_computer_export_setup___sort_.yml | 13 ++ tests/cmdline/utils/test_common.py | 39 +++++ tests/tools/dumping/test_processes.py | 5 +- 14 files changed, 336 insertions(+), 81 deletions(-) delete mode 100644 tests/cmdline/commands/test_code/test_code_export_True_.yml rename tests/cmdline/commands/test_code/{test_code_export_False_.yml => test_code_export___no_sort_.yml} (100%) rename tests/cmdline/commands/test_code/{test_code_export.yml => test_code_export___sort_.yml} (100%) create mode 100644 tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml create mode 100644 tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml diff --git a/src/aiida/cmdline/commands/cmd_code.py b/src/aiida/cmdline/commands/cmd_code.py index 477d2f61ab..9740ed8e02 100644 --- a/src/aiida/cmdline/commands/cmd_code.py +++ b/src/aiida/cmdline/commands/cmd_code.py @@ -8,6 +8,7 @@ ########################################################################### """`verdi code` command.""" +import pathlib from collections import defaultdict from functools import partial @@ -18,6 +19,7 @@ from aiida.cmdline.params import arguments, options, types from aiida.cmdline.params.options.commands import code as options_code from aiida.cmdline.utils import echo, echo_tabulate +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.cmdline.utils.decorators import with_dbenv from aiida.common import exceptions @@ -234,34 +236,35 @@ def show(code): @verdi_code.command() @arguments.CODE() -@arguments.OUTPUT_FILE(type=click.Path(exists=False)) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def export(code, output_file, sort): +def export(code, output_file, overwrite, sort): """Export code to a yaml file.""" + import yaml code_data = {} for key in code.Model.model_fields.keys(): - if key == 'computer': - value = getattr(code, key).label - else: - value = getattr(code, key) + value = getattr(code, key).label if key == 'computer' else getattr(code, key) # If the attribute is not set, for example ``with_mpi`` do not export it, because the YAML won't be valid for # use in ``verdi code create`` since ``None`` is not a valid value on the CLI. if value is not None: code_data[key] = str(value) - with open(output_file, 'w', encoding='utf-8') as yfhandle: - yaml.dump(code_data, yfhandle, sort_keys=sort) + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=code.label, overwrite=overwrite, appendix=f'@{code_data["computer"]}' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + + output_file.write_text(yaml.dump(code_data, sort_keys=sort)) + + echo.echo_success(f'Code<{code.pk}> {code.label} exported to file `{output_file}`.') @verdi_code.command() diff --git a/src/aiida/cmdline/commands/cmd_computer.py b/src/aiida/cmdline/commands/cmd_computer.py index 7f8508b77a..acb9c2da81 100644 --- a/src/aiida/cmdline/commands/cmd_computer.py +++ b/src/aiida/cmdline/commands/cmd_computer.py @@ -20,6 +20,7 @@ from aiida.cmdline.params import arguments, options from aiida.cmdline.params.options.commands import computer as options_computer from aiida.cmdline.utils import echo, echo_tabulate +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.cmdline.utils.decorators import with_dbenv from aiida.common.exceptions import EntryPointError, ValidationError from aiida.plugins.entry_point import get_entry_point_names @@ -741,16 +742,11 @@ def computer_export(): @computer_export.command('setup') @arguments.COMPUTER() -@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path)) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def computer_export_setup(computer, output_file, sort): +def computer_export_setup(computer, output_file, overwrite, sort): """Export computer setup to a YAML file.""" import yaml @@ -769,6 +765,14 @@ def computer_export_setup(computer, output_file, sort): 'prepend_text': computer.get_prepend_text(), 'append_text': computer.get_append_text(), } + + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=computer.label, overwrite=overwrite, appendix='-setup' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + try: output_file.write_text(yaml.dump(computer_setup, sort_keys=sort), 'utf-8') except Exception as e: @@ -783,19 +787,14 @@ def computer_export_setup(computer, output_file, sort): @computer_export.command('config') @arguments.COMPUTER() -@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path)) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) @options.USER( help='Email address of the AiiDA user from whom to export this computer (if different from default user).' ) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def computer_export_config(computer, output_file, user, sort): +def computer_export_config(computer, output_file, user, overwrite, sort): """Export computer transport configuration for a user to a YAML file.""" import yaml @@ -804,20 +803,29 @@ def computer_export_config(computer, output_file, user, sort): f'Computer<{computer.pk}> {computer.label} configuration cannot be exported,' ' because computer has not been configured yet.' ) + else: + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=computer.label, overwrite=overwrite, appendix='-config' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + try: computer_configuration = computer.get_configuration(user) output_file.write_text(yaml.dump(computer_configuration, sort_keys=sort), 'utf-8') - except Exception as e: + + except Exception as exception: error_traceback = traceback.format_exc() echo.CMDLINE_LOGGER.debug(error_traceback) if user is None: echo.echo_critical( - f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}: {e!s}.' + f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}: {exception!s}.' # noqa: E501 ) else: echo.echo_critical( f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}' - f' and User<{user.pk}> {user.email}: {e!s}.' + f' and User<{user.pk}> {user.email}: {exception!s}.' ) else: - echo.echo_success(f"Computer<{computer.pk}> {computer.label} configuration exported to file '{output_file}'.") + echo.echo_success(f'Computer<{computer.pk}> {computer.label} configuration exported to file `{output_file}`.') diff --git a/src/aiida/cmdline/params/options/__init__.py b/src/aiida/cmdline/params/options/__init__.py index 065efe4223..ea4be61461 100644 --- a/src/aiida/cmdline/params/options/__init__.py +++ b/src/aiida/cmdline/params/options/__init__.py @@ -92,6 +92,7 @@ 'REPOSITORY_PATH', 'SCHEDULER', 'SILENT', + 'SORT', 'TIMEOUT', 'TRAJECTORY_INDEX', 'TRANSPORT', diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index f5eb2d551f..d521828450 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -96,6 +96,7 @@ 'REPOSITORY_PATH', 'SCHEDULER', 'SILENT', + 'SORT', 'TIMEOUT', 'TRAJECTORY_INDEX', 'TRANSPORT', @@ -771,3 +772,12 @@ def set_log_level(ctx, _param, value): show_default=True, help='Overwrite file/directory if writing to disk.', ) + +SORT = OverridableOption( + '--sort/--no-sort', + 'sort', + is_flag=True, + default=True, + help='Sort the keys of the output YAML.', + show_default=True, +) diff --git a/src/aiida/cmdline/utils/common.py b/src/aiida/cmdline/utils/common.py index 53420fd33b..d410b33d91 100644 --- a/src/aiida/cmdline/utils/common.py +++ b/src/aiida/cmdline/utils/common.py @@ -8,10 +8,13 @@ ########################################################################### """Common utility functions for command line commands.""" +from __future__ import annotations + import logging import os import sys import textwrap +from pathlib import Path from typing import TYPE_CHECKING from click import style @@ -481,3 +484,22 @@ def build_entries(ports): echo.echo(tabulate(table, tablefmt='plain')) echo.echo(style('\nExit codes that invalidate the cache are marked in bold red.\n', italic=True)) + + +def generate_validate_output_file( + output_file: Path | None, entity_label: str, appendix: str = '', overwrite: bool = False +): + """Generate default output filename for `Code`/`Computer` export and validate.""" + + if output_file is None: + output_file = Path(f'{entity_label}{appendix}.yml') + + if output_file.is_dir(): + raise IsADirectoryError( + f'A directory with the name `{output_file.resolve()}` already exists. Remove manually and try again.' + ) + + if output_file.is_file() and not overwrite: + raise FileExistsError(f'File `{output_file}` already exists, use `--overwrite` to overwrite.') + + return output_file diff --git a/tests/cmdline/commands/test_code.py b/tests/cmdline/commands/test_code.py index 8aeeb5cec8..b7d1c5cf5f 100644 --- a/tests/cmdline/commands/test_code.py +++ b/tests/cmdline/commands/test_code.py @@ -259,8 +259,8 @@ def test_code_duplicate_ignore(run_cli_command, aiida_code_installed, non_intera @pytest.mark.usefixtures('aiida_profile_clean') -@pytest.mark.parametrize('sort', (True, False)) -def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regression, sort): +@pytest.mark.parametrize('sort_option', ('--sort', '--no-sort')) +def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regression, sort_option): """Test export the code setup to str.""" prepend_text = 'module load something\n some command' code = aiida_code_installed( @@ -271,14 +271,11 @@ def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regre ) filepath = tmp_path / 'code.yml' - options = [str(code.pk), str(filepath)] - options.append('--sort' if sort else '--no-sort') - - run_cli_command(cmd_code.export, options) - + options = [str(code.pk), str(filepath), sort_option] + result = run_cli_command(cmd_code.export, options) + assert str(filepath) in result.output, 'Filename should be in terminal output but was not found.' # file regression check - with open(filepath, 'r', encoding='utf-8') as fhandle: - content = fhandle.read() + content = filepath.read_text() file_regression.check(content, extension='.yml') # round trip test by create code from the config file @@ -292,6 +289,65 @@ def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regre assert isinstance(new_code, InstalledCode) +@pytest.mark.usefixtures('aiida_profile_clean') +def test_code_export_overwrite(run_cli_command, aiida_code_installed, tmp_path): + prepend_text = 'module load something\n some command' + code = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/cat', + label='code', + prepend_text=prepend_text, + ) + filepath = tmp_path / 'code.yml' + + options = [str(code.pk), str(filepath)] + + # Create directory with the same name and check that command fails + filepath.mkdir() + result = run_cli_command(cmd_code.export, options, raises=True) + assert f'A directory with the name `{filepath}` already exists' in result.output + filepath.rmdir() + + # Export fails if file already exists and overwrite set to False + filepath.touch() + result = run_cli_command(cmd_code.export, options, raises=True) + assert f'File `{filepath}` already exists' in result.output + + # Check that overwrite actually overwrites the exported Code config with the new data + code_echo = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/echo', + # Need to set different label, therefore manually specify the same output filename + label='code_echo', + prepend_text=prepend_text, + ) + + options = [str(code_echo.pk), str(filepath), '--overwrite'] + run_cli_command(cmd_code.export, options) + + content = filepath.read_text() + assert '/bin/echo' in content + + +@pytest.mark.usefixtures('aiida_profile_clean') +@pytest.mark.usefixtures('chdir_tmp_path') +def test_code_export_default_filename(run_cli_command, aiida_code_installed): + """Test default filename being created if no argument passed.""" + + prepend_text = 'module load something\n some command' + code = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/cat', + label='code', + prepend_text=prepend_text, + ) + + options = [str(code.pk)] + run_cli_command(cmd_code.export, options) + + assert pathlib.Path('code@localhost.yml').is_file() + + @pytest.mark.parametrize('non_interactive_editor', ('vim -cwq',), indirect=True) def test_from_config_local_file(non_interactive_editor, run_cli_command, aiida_localhost): """Test setting up a code from a config file on disk.""" diff --git a/tests/cmdline/commands/test_code/test_code_export_True_.yml b/tests/cmdline/commands/test_code/test_code_export_True_.yml deleted file mode 100644 index 640717a1d2..0000000000 --- a/tests/cmdline/commands/test_code/test_code_export_True_.yml +++ /dev/null @@ -1,8 +0,0 @@ -append_text: '' -computer: localhost -default_calc_job_plugin: core.arithmetic.add -description: '' -filepath_executable: /bin/cat -label: code -prepend_text: "module load something\n some command" -use_double_quotes: 'False' diff --git a/tests/cmdline/commands/test_code/test_code_export_False_.yml b/tests/cmdline/commands/test_code/test_code_export___no_sort_.yml similarity index 100% rename from tests/cmdline/commands/test_code/test_code_export_False_.yml rename to tests/cmdline/commands/test_code/test_code_export___no_sort_.yml diff --git a/tests/cmdline/commands/test_code/test_code_export.yml b/tests/cmdline/commands/test_code/test_code_export___sort_.yml similarity index 100% rename from tests/cmdline/commands/test_code/test_code_export.yml rename to tests/cmdline/commands/test_code/test_code_export___sort_.yml diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py index 128a3bd61f..dac1170770 100644 --- a/tests/cmdline/commands/test_computer.py +++ b/tests/cmdline/commands/test_computer.py @@ -9,6 +9,7 @@ """Tests for the 'verdi computer' command.""" import os +import pathlib import tempfile import textwrap from collections import OrderedDict @@ -515,69 +516,167 @@ def test_show(self): assert '--username=' in result.output assert result_cur.output == result.output - @pytest.mark.parametrize('sort', ['--sort', '--no-sort']) - def test_computer_export_setup(self, tmp_path, sort): - """Test if 'verdi computer export setup' command works""" - self.comp_builder.label = 'test_computer_export_setup' + sort + @pytest.mark.parametrize('sort_option', ('--sort', '--no-sort')) + def test_computer_export_setup(self, tmp_path, file_regression, sort_option): + """Test if `verdi computer export setup` command works""" + self.comp_builder.label = f'test_computer_export_setup{sort_option}' + # Label needs to be unique during parametrization self.comp_builder.transport = 'core.ssh' comp = self.comp_builder.new() comp.store() exported_setup_filename = tmp_path / 'computer-setup.yml' - result = self.cli_runner(computer_export_setup, [sort, comp.label, exported_setup_filename]) - assert result.exit_code == 0, 'Command should have run successfull.' + + # Successfull write behavior + result = self.cli_runner(computer_export_setup, [comp.label, exported_setup_filename, sort_option]) assert str(exported_setup_filename) in result.output, 'Filename should be in terminal output but was not found.' assert exported_setup_filename.exists(), f"'{exported_setup_filename}' was not created during export." + + # file regresssion check + content = exported_setup_filename.read_text() + file_regression.check(content, extension='.yml') + # verifying correctness by comparing internal and loaded yml object configure_setup_data = yaml.safe_load(exported_setup_filename.read_text()) assert configure_setup_data == self.comp_builder.get_computer_spec( comp ), 'Internal computer configuration does not agree with exported one.' + def test_computer_export_setup_overwrite(self, tmp_path): + """Test if overwriting behavior of `verdi computer export setup` command works as expected""" + + self.comp_builder.label = 'test_computer_export_setup' + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + + exported_setup_filename = tmp_path / 'computer-setup.yml' + # Check that export fails if the file already exists + exported_setup_filename.touch() + result = self.cli_runner(computer_export_setup, [comp.label, exported_setup_filename], raises=True) + # assert 'already exists, use `--overwrite`' in result.output + + # Create new instance and check that change is reflected in new YAML file output + self.comp_builder.label = 'test_computer_export_setup_local' + self.comp_builder.transport = 'core.local' + comp_local = self.comp_builder.new() + comp_local.store() + result = self.cli_runner(computer_export_setup, [comp_local.label, exported_setup_filename, '--overwrite']) + content = exported_setup_filename.read_text() + assert 'core.local' in content + # we create a directory so we raise an error when exporting with the same name - # to test the except part of the function - already_existing_filename = tmp_path / 'tmp_dir' - already_existing_filename.mkdir() - result = self.cli_runner(computer_export_setup, [sort, comp.label, already_existing_filename], raises=True) - assert result.exit_code == ExitCode.CRITICAL + already_existing_directory = tmp_path / 'tmp_dir' + already_existing_directory.mkdir() + result = self.cli_runner(computer_export_setup, [comp.label, already_existing_directory], raises=True) + assert f'A directory with the name `{already_existing_directory}` already exists.' in result.output + + @pytest.mark.usefixtures('chdir_tmp_path') + def test_computer_export_setup_default_filename(self): + """Test that default filename is as expected when not specified for `verdi computer export setup`.""" + comp_label = 'test_computer_export_setup_default' + self.comp_builder.label = comp_label + # Label needs to be unique during parametrization + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() - @pytest.mark.parametrize('sort', ['--sort', '--no-sort']) - def test_computer_export_config(self, tmp_path, sort): + exported_setup_filename = f'{comp_label}-setup.yml' + + self.cli_runner(computer_export_setup, [comp.label]) + assert pathlib.Path(exported_setup_filename).is_file() + + def test_computer_export_config(self, tmp_path): """Test if 'verdi computer export config' command works""" - self.comp_builder.label = 'test_computer_export_config' + sort + self.comp_builder.label = 'test_computer_export_config' self.comp_builder.transport = 'core.ssh' comp = self.comp_builder.new() comp.store() exported_config_filename = tmp_path / 'computer-configure.yml' + # We have not configured the computer yet so it should exit with an critical error result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) assert result.exit_code == ExitCode.CRITICAL comp.configure(safe_interval=0.0) + comp.configure(username='aiida') + + # Write sorted output file result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename]) assert 'Success' in result.output, 'Command should have run successfull.' assert ( str(exported_config_filename) in result.output ), 'Filename should be in terminal output but was not found.' assert exported_config_filename.exists(), f"'{exported_config_filename}' was not created during export." + + content = exported_config_filename.read_text() + assert content.startswith('safe_interval: 0.0') + # verifying correctness by comparing internal and loaded yml object configure_config_data = yaml.safe_load(exported_config_filename.read_text()) assert ( configure_config_data == comp.get_configuration() ), 'Internal computer configuration does not agree with exported one.' - # we create a directory so we raise an error when exporting with the same name - # to test the except part of the function - already_existing_filename = tmp_path / 'tmp_dir' - already_existing_filename.mkdir() - result = self.cli_runner(computer_export_config, [comp.label, already_existing_filename], raises=True) - assert result.exit_code == ExitCode.CRITICAL + # Check that unsorted output file creation works as expected + exported_config_filename.unlink() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename, '--no-sort']) + assert 'Success' in result.output, 'Command should have run successfull.' + assert ( + str(exported_config_filename) in result.output + ), 'Filename should be in terminal output but was not found.' + assert exported_config_filename.exists(), f"'{exported_config_filename}' was not created during export." - result = self.cli_runner( - computer_export_config, ['--user', self.user.email, comp.label, already_existing_filename], raises=True - ) - assert result.exit_code == ExitCode.CRITICAL + # Check contents + content = exported_config_filename.read_text() + assert 'username: aiida' in content, 'username not in output YAML' + assert 'safe_interval: 0.0' in content, 'safe_interval not in output YAML' + + def test_computer_export_config_overwrite(self, tmp_path): + """Test if overwrite behavior of `verdi computer export config` command works""" + self.comp_builder.label = 'test_computer_export_config_overwrite' + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + comp.configure(safe_interval=0.0) + + exported_config_filename = tmp_path / 'computer-configure.yml' + + # Create directory with the same name and check that command fails + exported_config_filename.mkdir() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) + assert f'A directory with the name `{exported_config_filename}` already exists' in result.output + exported_config_filename.rmdir() + + # Check that export fails if the file already exists + exported_config_filename.touch() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) + assert 'already exists, use `--overwrite`' in result.output + + # Create new instance and check that change is reflected in overwritten YAML output file + self.comp_builder.label = 'test_computer_export_config_0' + comp_mod = self.comp_builder.new() + comp_mod.store() + comp_mod.configure(safe_interval=1.0) + self.cli_runner(computer_export_config, [comp_mod.label, exported_config_filename, '--overwrite']) + content = exported_config_filename.read_text() + assert 'safe_interval: 1.0' in content + + @pytest.mark.usefixtures('chdir_tmp_path') + def test_computer_export_config_default_filename(self): + """Test that default filename is as expected when not specified for `verdi computer export config`.""" + comp_label = 'test_computer_export_config_default' + self.comp_builder.label = comp_label + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + comp.configure(safe_interval=0.0) + + exported_config_filename = f'{comp_label}-config.yml' + + self.cli_runner(computer_export_config, [comp.label]) + assert pathlib.Path(exported_config_filename).is_file() class TestVerdiComputerCommands: diff --git a/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml b/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml new file mode 100644 index 0000000000..7fc3ce33fd --- /dev/null +++ b/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml @@ -0,0 +1,13 @@ +label: test_computer_export_setup--no-sort +hostname: localhost +description: Test Computer +transport: core.ssh +scheduler: core.direct +shebang: '#!xonsh' +work_dir: /tmp/aiida +mpirun_command: mpirun +mpiprocs_per_machine: 8 +default_memory_per_machine: 100000 +use_double_quotes: false +prepend_text: '' +append_text: '' diff --git a/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml b/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml new file mode 100644 index 0000000000..a1c7f6d9cc --- /dev/null +++ b/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml @@ -0,0 +1,13 @@ +append_text: '' +default_memory_per_machine: 100000 +description: Test Computer +hostname: localhost +label: test_computer_export_setup--sort +mpiprocs_per_machine: 8 +mpirun_command: mpirun +prepend_text: '' +scheduler: core.direct +shebang: '#!xonsh' +transport: core.ssh +use_double_quotes: false +work_dir: /tmp/aiida diff --git a/tests/cmdline/utils/test_common.py b/tests/cmdline/utils/test_common.py index 863f17d7a4..69a01090df 100644 --- a/tests/cmdline/utils/test_common.py +++ b/tests/cmdline/utils/test_common.py @@ -8,7 +8,11 @@ ########################################################################### """Tests for the :mod:`aiida.cmdline.utils.common` module.""" +from pathlib import Path + +import pytest from aiida.cmdline.utils import common +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.common import LinkType from aiida.engine import Process, calcfunction from aiida.orm import CalcFunctionNode, CalculationNode, WorkflowNode @@ -88,3 +92,38 @@ def test_with_docstring(): common.print_process_info(TestProcessWithDocstring) common.print_process_info(test_without_docstring) common.print_process_info(test_with_docstring) + + +@pytest.mark.usefixtures('chdir_tmp_path') +def test_generate_validate_output(): + test_entity_label = 'test_code' + test_appendix = '@test_computer' + + expected_output_file = Path(f'{test_entity_label}{test_appendix}.yml') + + # Test default label creation + obtained_output_file = generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix + ) + assert expected_output_file == obtained_output_file, 'Filenames differ' + + # Test failure if file exists, but overwrite False + expected_output_file.touch() + with pytest.raises(FileExistsError, match='.*use `--overwrite` to overwrite.'): + generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=False + ) + + # Test that overwrite does the job + obtained_output_file = generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=True + ) + assert expected_output_file == obtained_output_file, 'Overwrite unsuccessful' + expected_output_file.unlink() + + # Test failure if directory exists + expected_output_file.mkdir() + with pytest.raises(IsADirectoryError, match='A directory with the name.*'): + generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=False + ) diff --git a/tests/tools/dumping/test_processes.py b/tests/tools/dumping/test_processes.py index aab1a48abb..82e704f4e2 100644 --- a/tests/tools/dumping/test_processes.py +++ b/tests/tools/dumping/test_processes.py @@ -302,9 +302,8 @@ def test_dump_calculation_add(tmp_path, generate_calculation_node_add): # Tests for helper methods -def test_validate_make_dump_path(chdir_tmp_path, tmp_path): - chdir_tmp_path - +@pytest.mark.usefixtures('chdir_tmp_path') +def test_validate_make_dump_path(tmp_path): safeguard_file = node_metadata_file # Path must be provided From 120c8ac6dcd15cec1ff3260ab65276c60027dd5f Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Wed, 10 Jul 2024 14:55:51 +0200 Subject: [PATCH 42/42] CLI: Catch `NotImplementedError` in `verdi calcjob gotocomputer` (#6525) Not all transport plugins implement the `gotocomputer` method. Instead of excepting, the remote working directory is now displayed. --- src/aiida/cmdline/commands/cmd_calcjob.py | 10 ++++-- tests/cmdline/commands/test_calcjob.py | 41 +++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_calcjob.py b/src/aiida/cmdline/commands/cmd_calcjob.py index 9b34f50100..376301247e 100644 --- a/src/aiida/cmdline/commands/cmd_calcjob.py +++ b/src/aiida/cmdline/commands/cmd_calcjob.py @@ -43,9 +43,13 @@ def calcjob_gotocomputer(calcjob): if not remote_workdir: echo.echo_critical('no remote work directory for this calcjob, maybe the daemon did not submit it yet') - command = transport.gotocomputer_command(remote_workdir) - echo.echo_report('going to the remote work directory...') - os.system(command) + try: + command = transport.gotocomputer_command(remote_workdir) + echo.echo_report('going to the remote work directory...') + os.system(command) + except NotImplementedError: + echo.echo_report(f'gotocomputer is not implemented for {transport}') + echo.echo_report(f'remote work directory is {remote_workdir}') @verdi_calcjob.command('res') diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index 9fa6467d7f..a9ca84a991 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -356,3 +356,44 @@ def test_calcjob_remotecat(self): options = [str(self.result_job.uuid), 'fileA.txt'] result = self.cli_runner.invoke(command.calcjob_remotecat, options) assert result.stdout == 'test stringA' + + def test_calcjob_gotocomputer(self): + """Test verdi calcjob gotocomputer""" + + from unittest.mock import patch + + from aiida.common.exceptions import NotExistent + + options = [str(self.result_job.uuid)] + + # Easy peasy no exception + with patch('os.system') as mock_os_system: + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + mock_os_system.assert_called_once() + assert mock_os_system.call_args[0][0] is not None + + def raise_(e): + raise e('something') + + # Test when get_transport raises NotExistent + with patch( + 'aiida.orm.nodes.process.calculation.calcjob.CalcJobNode.get_transport', new=lambda _: raise_(NotExistent) + ): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 1 + assert 'something' in result.output + + # Test when get_remote_workdir returns None + with patch('aiida.orm.nodes.process.calculation.calcjob.CalcJobNode.get_remote_workdir', new=lambda _: None): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 1 + assert 'no remote work directory for this calcjob' in result.output + + # Test when gotocomputer_command raises NotImplementedError + with patch( + 'aiida.transports.plugins.local.LocalTransport.gotocomputer_command', + new=lambda _, __: raise_(NotImplementedError), + ): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 0 + assert self.result_job.get_remote_workdir() in result.output