From 0b2fb318b3d2b38fad5325c6687e1701dbfbcc02 Mon Sep 17 00:00:00 2001 From: theissenhelen Date: Wed, 31 Jul 2024 15:54:17 +0000 Subject: [PATCH 01/16] feat: add configurability to dropout in MultiHeadSelfAttention Co-authored-by: Rilwan (Akanni) Adewoyin <18564167+Rilwan-Adewoyin@users.noreply.github.com> --- src/anemoi/models/layers/attention.py | 15 +++++++++++---- src/anemoi/models/layers/block.py | 12 ++++++++++-- src/anemoi/models/layers/chunk.py | 4 ++++ src/anemoi/models/layers/processor.py | 4 ++++ 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/anemoi/models/layers/attention.py b/src/anemoi/models/layers/attention.py index 2063ad0..931e098 100644 --- a/src/anemoi/models/layers/attention.py +++ b/src/anemoi/models/layers/attention.py @@ -40,7 +40,7 @@ def __init__( bias: bool = False, is_causal: bool = False, window_size: Optional[int] = None, - dropout: float = 0.0, + dropout_p: float = 0.0, ): super().__init__() @@ -48,11 +48,11 @@ def __init__( embed_dim % num_heads == 0 ), f"Embedding dimension ({embed_dim}) must be divisible by number of heads ({num_heads})" - self.dropout = dropout self.num_heads = num_heads self.embed_dim = embed_dim self.head_dim = embed_dim // num_heads # q k v self.window_size = (window_size, window_size) # flash attention + self.dropout_p = dropout_p self.is_causal = is_causal self.lin_qkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias) @@ -86,15 +86,22 @@ def forward( query = shard_heads(query, shapes=shapes, mgroup=model_comm_group) key = shard_heads(key, shapes=shapes, mgroup=model_comm_group) value = shard_heads(value, shapes=shapes, mgroup=model_comm_group) + dropout_p = self.dropout_p if self.training else 0.0 if _FLASH_ATTENTION_AVAILABLE: query, key, value = ( einops.rearrange(t, "batch heads grid vars -> batch grid heads vars") for t in (query, key, value) ) - out = self.attention(query, key, value, causal=False, window_size=self.window_size) + out = self.attention(query, key, value, causal=False, window_size=self.window_size, dropout_p=dropout_p) out = einops.rearrange(out, "batch grid heads vars -> batch heads grid vars") else: - out = self.attention(query, key, value, is_causal=False) # expects (batch heads grid variable) format + out = self.attention( + query, + key, + value, + is_causal=False, + dropout_p=dropout_p, + ) # expects (batch heads grid variable) format out = shard_sequence(out, shapes=shapes, mgroup=model_comm_group) out = einops.rearrange(out, "batch heads grid vars -> (batch grid) (heads vars)") diff --git a/src/anemoi/models/layers/block.py b/src/anemoi/models/layers/block.py index ba29607..7fd3627 100644 --- a/src/anemoi/models/layers/block.py +++ b/src/anemoi/models/layers/block.py @@ -55,7 +55,15 @@ def forward( class TransformerProcessorBlock(BaseBlock): """Transformer block with MultiHeadSelfAttention and MLPs.""" - def __init__(self, num_channels, hidden_dim, num_heads, activation, window_size: int): + def __init__( + self, + num_channels: int, + hidden_dim: int, + num_heads: int, + activation: str, + window_size: int, + dropout_p: float = 0.0, + ): super().__init__() try: @@ -72,7 +80,7 @@ def __init__(self, num_channels, hidden_dim, num_heads, activation, window_size: window_size=window_size, bias=False, is_causal=False, - dropout=0.0, + dropout_p=dropout_p, ) self.mlp = nn.Sequential( diff --git a/src/anemoi/models/layers/chunk.py b/src/anemoi/models/layers/chunk.py index 61dec34..87d0ac7 100644 --- a/src/anemoi/models/layers/chunk.py +++ b/src/anemoi/models/layers/chunk.py @@ -73,6 +73,7 @@ def __init__( num_heads: int = 16, mlp_hidden_ratio: int = 4, activation: str = "GELU", + dropout_p: float = 0.0, ) -> None: """Initialize TransformerProcessor. @@ -88,6 +89,8 @@ def __init__( ratio of mlp hidden dimension to embedding dimension, default 4 activation : str, optional Activation function, by default "GELU" + dropout_p: float + Dropout probability used for multi-head self attention, default 0.0 """ super().__init__(num_channels=num_channels, num_layers=num_layers) @@ -98,6 +101,7 @@ def __init__( num_heads=num_heads, activation=activation, window_size=window_size, + dropout_p=dropout_p, ) def forward( diff --git a/src/anemoi/models/layers/processor.py b/src/anemoi/models/layers/processor.py index bb33609..6ba8eb1 100644 --- a/src/anemoi/models/layers/processor.py +++ b/src/anemoi/models/layers/processor.py @@ -95,6 +95,7 @@ def __init__( cpu_offload: bool = False, num_heads: int = 16, mlp_hidden_ratio: int = 4, + dropout_p: float = 0.1, **kwargs, ) -> None: """Initialize TransformerProcessor. @@ -113,6 +114,8 @@ def __init__( ratio of mlp hidden dimension to embedding dimension, default 4 activation : str, optional Activation function, by default "GELU" + dropout_p: float, optional + Dropout probability used for multi-head self attention, default 0.0 """ super().__init__( num_channels=num_channels, @@ -133,6 +136,7 @@ def __init__( num_layers=self.chunk_size, window_size=window_size, activation=activation, + dropout_p=dropout_p, ) self.offload_layers(cpu_offload) From 7a0e87161534c7b40b8af70ef66acbce8e2fa531 Mon Sep 17 00:00:00 2001 From: theissenhelen Date: Wed, 31 Jul 2024 15:59:04 +0000 Subject: [PATCH 02/16] test: adjust to dropout_p --- tests/layers/block/test_block_transformer.py | 13 ++++++++++--- tests/layers/chunk/test_chunk_transformer.py | 4 ++++ .../processor/test_transformer_processor.py | 6 ++++++ tests/layers/test_attention.py | 16 ++++++++++------ 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/tests/layers/block/test_block_transformer.py b/tests/layers/block/test_block_transformer.py index 97c274f..2e63386 100644 --- a/tests/layers/block/test_block_transformer.py +++ b/tests/layers/block/test_block_transformer.py @@ -29,11 +29,14 @@ class TestTransformerProcessorBlock: num_heads=st.integers(min_value=1, max_value=10), activation=st.sampled_from(["ReLU", "GELU", "Tanh"]), window_size=st.integers(min_value=1, max_value=512), + dropout_p=st.floats(min_value=0.0, max_value=1.0), ) @settings(max_examples=10) - def test_init(self, factor_attention_heads, hidden_dim, num_heads, activation, window_size): + def test_init(self, factor_attention_heads, hidden_dim, num_heads, activation, window_size, dropout_p): num_channels = num_heads * factor_attention_heads - block = TransformerProcessorBlock(num_channels, hidden_dim, num_heads, activation, window_size) + block = TransformerProcessorBlock( + num_channels, hidden_dim, num_heads, activation, window_size, dropout_p=dropout_p + ) assert isinstance(block, TransformerProcessorBlock) assert isinstance(block.layer_norm1, nn.LayerNorm) @@ -49,6 +52,7 @@ def test_init(self, factor_attention_heads, hidden_dim, num_heads, activation, w window_size=st.integers(min_value=1, max_value=512), shapes=st.lists(st.integers(min_value=1, max_value=10), min_size=3, max_size=3), batch_size=st.integers(min_value=1, max_value=40), + dropout_p=st.floats(min_value=0.0, max_value=1.0), ) @settings(max_examples=10) def test_forward_output( @@ -60,9 +64,12 @@ def test_forward_output( window_size, shapes, batch_size, + dropout_p, ): num_channels = num_heads * factor_attention_heads - block = TransformerProcessorBlock(num_channels, hidden_dim, num_heads, activation, window_size) + block = TransformerProcessorBlock( + num_channels, hidden_dim, num_heads, activation, window_size, dropout_p=dropout_p + ) x = torch.randn((batch_size, num_channels)) diff --git a/tests/layers/chunk/test_chunk_transformer.py b/tests/layers/chunk/test_chunk_transformer.py index 1fe7c6d..5449e97 100644 --- a/tests/layers/chunk/test_chunk_transformer.py +++ b/tests/layers/chunk/test_chunk_transformer.py @@ -20,6 +20,7 @@ def init(self): mlp_hidden_ratio: int = 4 activation: str = "GELU" window_size: int = 13 + dropout_p: float = 0.1 # num_heads must be evenly divisible by num_channels for MHSA return ( @@ -29,6 +30,7 @@ def init(self): mlp_hidden_ratio, activation, window_size, + dropout_p, ) @pytest.fixture @@ -40,6 +42,7 @@ def processor_chunk(self, init): mlp_hidden_ratio, activation, window_size, + dropout_p, ) = init return TransformerProcessorChunk( num_channels=num_channels, @@ -48,6 +51,7 @@ def processor_chunk(self, init): mlp_hidden_ratio=mlp_hidden_ratio, activation=activation, window_size=window_size, + dropout_p=dropout_p, ) def test_all_blocks(self, processor_chunk): diff --git a/tests/layers/processor/test_transformer_processor.py b/tests/layers/processor/test_transformer_processor.py index d359c27..305af41 100644 --- a/tests/layers/processor/test_transformer_processor.py +++ b/tests/layers/processor/test_transformer_processor.py @@ -21,6 +21,7 @@ def transformer_processor_init(): cpu_offload = False num_heads = 16 mlp_hidden_ratio = 4 + dropout_p = 0.1 return ( num_layers, window_size, @@ -30,6 +31,7 @@ def transformer_processor_init(): cpu_offload, num_heads, mlp_hidden_ratio, + dropout_p, ) @@ -44,6 +46,7 @@ def transformer_processor(transformer_processor_init): cpu_offload, num_heads, mlp_hidden_ratio, + dropout_p, ) = transformer_processor_init return TransformerProcessor( num_layers=num_layers, @@ -54,6 +57,7 @@ def transformer_processor(transformer_processor_init): cpu_offload=cpu_offload, num_heads=num_heads, mlp_hidden_ratio=mlp_hidden_ratio, + dropout_p=dropout_p, ) @@ -67,6 +71,7 @@ def test_transformer_processor_init(transformer_processor, transformer_processor _cpu_offload, _num_heads, _mlp_hidden_ratio, + _dropout_p, ) = transformer_processor_init assert isinstance(transformer_processor, TransformerProcessor) assert transformer_processor.num_chunks == num_chunks @@ -84,6 +89,7 @@ def test_transformer_processor_forward(transformer_processor, transformer_proces _cpu_offload, _num_heads, _mlp_hidden_ratio, + _dropout_p, ) = transformer_processor_init gridsize = 100 batch_size = 1 diff --git a/tests/layers/test_attention.py b/tests/layers/test_attention.py index ffeaebc..9457317 100644 --- a/tests/layers/test_attention.py +++ b/tests/layers/test_attention.py @@ -18,17 +18,19 @@ @given( num_heads=st.integers(min_value=1, max_value=50), embed_dim_multiplier=st.integers(min_value=1, max_value=10), + dropout_p=st.floats(min_value=0.0, max_value=1.0), ) -def test_multi_head_self_attention_init(num_heads, embed_dim_multiplier): +def test_multi_head_self_attention_init(num_heads, embed_dim_multiplier, dropout_p): embed_dim = ( num_heads * embed_dim_multiplier ) # TODO: Make assert in MHSA to check if embed_dim is divisible by num_heads - mhsa = MultiHeadSelfAttention(num_heads, embed_dim) + mhsa = MultiHeadSelfAttention(num_heads, embed_dim, dropout_p=dropout_p) assert isinstance(mhsa, nn.Module) assert mhsa.num_heads == num_heads assert mhsa.embed_dim == embed_dim assert mhsa.head_dim == embed_dim // num_heads + assert dropout_p == mhsa.dropout_p @pytest.mark.gpu @@ -36,11 +38,12 @@ def test_multi_head_self_attention_init(num_heads, embed_dim_multiplier): batch_size=st.integers(min_value=1, max_value=64), num_heads=st.integers(min_value=1, max_value=20), embed_dim_multiplier=st.integers(min_value=1, max_value=10), + dropout_p=st.floats(min_value=0.0, max_value=1.0), ) @settings(deadline=None) -def test_multi_head_self_attention_forward(batch_size, num_heads, embed_dim_multiplier): +def test_multi_head_self_attention_forward(batch_size, num_heads, embed_dim_multiplier, dropout_p): embed_dim = num_heads * embed_dim_multiplier - mhsa = MultiHeadSelfAttention(num_heads, embed_dim) + mhsa = MultiHeadSelfAttention(num_heads, embed_dim, dropout_p=dropout_p) x = torch.randn(batch_size * 2, embed_dim) shapes = [list(x.shape)] @@ -54,10 +57,11 @@ def test_multi_head_self_attention_forward(batch_size, num_heads, embed_dim_mult batch_size=st.integers(min_value=1, max_value=64), num_heads=st.integers(min_value=1, max_value=20), embed_dim_multiplier=st.integers(min_value=1, max_value=10), + dropout_p=st.floats(min_value=0.0, max_value=1.0), ) -def test_multi_head_self_attention_backward(batch_size, num_heads, embed_dim_multiplier): +def test_multi_head_self_attention_backward(batch_size, num_heads, embed_dim_multiplier, dropout_p): embed_dim = num_heads * embed_dim_multiplier - mhsa = MultiHeadSelfAttention(num_heads, embed_dim) + mhsa = MultiHeadSelfAttention(num_heads, embed_dim, dropout_p=dropout_p) x = torch.randn(batch_size * 2, embed_dim, requires_grad=True) shapes = [list(x.shape)] From 4f695c12abf1b20f46fa1efa5a317cc2f694cf42 Mon Sep 17 00:00:00 2001 From: theissenhelen Date: Thu, 1 Aug 2024 08:41:22 +0000 Subject: [PATCH 03/16] doc: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 661ed93..610d82f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Keep it human-readable, your future self will thank you! ## [Unreleased] ### Added +- configurabilty of the dropout probability in the the MultiHeadSelfAttention module ### Changed From 17d2f7339fabc1ef12f39ee0b6b1748ad5b9b2de Mon Sep 17 00:00:00 2001 From: Helen Theissen Date: Thu, 8 Aug 2024 13:18:50 +0100 Subject: [PATCH 04/16] Feature/integrate reusable workflows (#16) * ci: add public pr label * ci: add readthedocs update check * ci: add downstream ci * ci: add ci-config * chore(deps): remove unused dependency * docs: update changelog * ci: switch to main --- .github/ci-config.yml | 3 ++ .github/workflows/ci.yml | 40 +++++++++++++++++++++ .github/workflows/label-public-pr.yml | 10 ++++++ .github/workflows/readthedocs-pr-update.yml | 22 ++++++++++++ CHANGELOG.md | 2 ++ pyproject.toml | 1 - 6 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 .github/ci-config.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/label-public-pr.yml create mode 100644 .github/workflows/readthedocs-pr-update.yml diff --git a/.github/ci-config.yml b/.github/ci-config.yml new file mode 100644 index 0000000..f712f26 --- /dev/null +++ b/.github/ci-config.yml @@ -0,0 +1,3 @@ +dependency_branch: develop +parallelism_factor: 8 +self_build: false # Only for python packages diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..ab853f5 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,40 @@ +name: ci + +on: + # Trigger the workflow on push to master or develop, except tag creation + push: + branches: + - 'main' + - 'develop' + tags-ignore: + - '**' + + # Trigger the workflow on pull request + pull_request: ~ + + # Trigger the workflow manuallyp instals + workflow_dispatch: ~ + + # Trigger after public PR approved for CI + pull_request_target: + types: [labeled] + +jobs: + # Run CI including downstream packages on self-hosted runners + downstream-ci: + name: downstream-ci + if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} + uses: ecmwf-actions/downstream-ci/.github/workflows/downstream-ci.yml@main + with: + anemoi-models: ecmwf/anemoi-models@${{ github.event.pull_request.head.sha || github.sha }} + codecov_upload: true + secrets: inherit + + # Build downstream packages on HPC + downstream-ci-hpc: + name: downstream-ci-hpc + if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} + uses: ecmwf-actions/downstream-ci/.github/workflows/downstream-ci.yml@main + with: + anemoi-models: ecmwf/anemoi-models@${{ github.event.pull_request.head.sha || github.sha }} + secrets: inherit diff --git a/.github/workflows/label-public-pr.yml b/.github/workflows/label-public-pr.yml new file mode 100644 index 0000000..59b2bfa --- /dev/null +++ b/.github/workflows/label-public-pr.yml @@ -0,0 +1,10 @@ +# Manage labels of pull requests that originate from forks +name: label-public-pr + +on: + pull_request_target: + types: [opened, synchronize] + +jobs: + label: + uses: ecmwf-actions/reusable-workflows/.github/workflows/label-pr.yml@v2 diff --git a/.github/workflows/readthedocs-pr-update.yml b/.github/workflows/readthedocs-pr-update.yml new file mode 100644 index 0000000..264c405 --- /dev/null +++ b/.github/workflows/readthedocs-pr-update.yml @@ -0,0 +1,22 @@ +name: Read the Docs PR Preview +on: + pull_request_target: + types: + - opened + - synchronize + - reopened + # Execute this action only on PRs that touch + # documentation files. + paths: + - "docs/**" + +permissions: + pull-requests: write + +jobs: + documentation-links: + runs-on: ubuntu-latest + steps: + - uses: readthedocs/actions/preview@v1 + with: + project-slug: "anemoi-models" diff --git a/CHANGELOG.md b/CHANGELOG.md index 610d82f..5932572 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ Keep it human-readable, your future self will thank you! ### Added - configurabilty of the dropout probability in the the MultiHeadSelfAttention module +- added downstream-ci pipeline +- readthedocs PR update check action ### Changed diff --git a/pyproject.toml b/pyproject.toml index 66e617b..05a99c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,6 @@ dynamic = [ "version", ] dependencies = [ - "anemoi-datasets>=0.2.1", "anemoi-utils>=0.1.9", "einops>=0.6.1", "hydra-core>=1.3", From a0f30bab6d58b18a697f44853234ab1b7e7ade3b Mon Sep 17 00:00:00 2001 From: Gert Mertes Date: Fri, 9 Aug 2024 08:33:22 +0000 Subject: [PATCH 05/16] chore: changelog 0.2.1 --- CHANGELOG.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5932572..a531801 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,13 +12,22 @@ Keep it human-readable, your future self will thank you! ### Added - configurabilty of the dropout probability in the the MultiHeadSelfAttention module -- added downstream-ci pipeline -- readthedocs PR update check action ### Changed ### Removed +## 0.2.1 + +### Added + +- downstream-ci pipeline +- readthedocs PR update check action + +### Removed + +- anemoi-datasets dependency + ## 0.2.0 ### Added From 68a7de0b1c55b23096ec67a9719620c5297f8c32 Mon Sep 17 00:00:00 2001 From: Mario Santa Cruz <48736305+JPXKQX@users.noreply.github.com> Date: Fri, 9 Aug 2024 11:21:58 +0200 Subject: [PATCH 06/16] Update error messages from invalid sub_graph in model instantiation (#20) --- CHANGELOG.md | 2 ++ src/anemoi/models/layers/mapper.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a531801..03e5fae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ Keep it human-readable, your future self will thank you! ### Changed +- New error messages for wrongs graphs. + ### Removed ## 0.2.1 diff --git a/src/anemoi/models/layers/mapper.py b/src/anemoi/models/layers/mapper.py index 04efdf0..0967041 100644 --- a/src/anemoi/models/layers/mapper.py +++ b/src/anemoi/models/layers/mapper.py @@ -134,8 +134,8 @@ def _register_edges( trainable_size : int Trainable tensor size """ - if edge_attributes is None: - raise ValueError("Edge attributes must be provided") + assert sub_graph, f"{self.__class__.__name__} needs a valid sub_graph to register edges." + assert edge_attributes is not None, "Edge attributes must be provided" edge_attr_tensor = torch.cat([sub_graph[attr] for attr in edge_attributes], axis=1) From 478e875c60557d203c722038f46b214cec8cd9fe Mon Sep 17 00:00:00 2001 From: Jesper Dramsch Date: Fri, 9 Aug 2024 15:17:08 +0200 Subject: [PATCH 07/16] ci: inherit pypi publish flow (#17) * ci: inherit pypi publish flow Co-authored-by: Helen Theissen * docs: add to changelog * fix: typo in reusable workflow * fix: another typo * chore: bump actions/setup-python to v5 * ci: run downstream-ci for changes in src and tests * docs: update changelog --------- Co-authored-by: Helen Theissen --- .github/workflows/ci.yml | 3 +++ .github/workflows/python-publish.yml | 27 +++------------------------ CHANGELOG.md | 2 ++ 3 files changed, 8 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ab853f5..5867ee0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,9 @@ on: - 'develop' tags-ignore: - '**' + paths: + - "src/**" + - "tests/**" # Trigger the workflow on pull request pull_request: ~ diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 666f65d..de01bf6 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -48,27 +48,6 @@ jobs: run: pytest deploy: - - if: ${{ github.event_name == 'release' }} - runs-on: ubuntu-latest needs: [checks, quality] - - steps: - - uses: actions/checkout@v4 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.x - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build wheel twine - - name: Build and publish - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} - run: | - python -m build - twine upload dist/* + uses: ecmwf-actions/reusable-workflows/.github/workflows/cd-pypi.yml@v2 + secrets: inherit diff --git a/CHANGELOG.md b/CHANGELOG.md index 03e5fae..e39f5aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ Keep it human-readable, your future self will thank you! - configurabilty of the dropout probability in the the MultiHeadSelfAttention module ### Changed + - Update CI to inherit from common infrastructue reusable workflows + - run downstream-ci only when src and tests folders have changed - New error messages for wrongs graphs. From d82cbf526d0edc16399048f3bb3f63bcb5aee77e Mon Sep 17 00:00:00 2001 From: Jesper Dramsch Date: Fri, 9 Aug 2024 15:21:37 +0200 Subject: [PATCH 08/16] Update CHANGELOG.md to KeepChangelog format --- CHANGELOG.md | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e39f5aa..04e8df4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,14 +14,14 @@ Keep it human-readable, your future self will thank you! - configurabilty of the dropout probability in the the MultiHeadSelfAttention module ### Changed + - Update CI to inherit from common infrastructue reusable workflows - run downstream-ci only when src and tests folders have changed - -- New error messages for wrongs graphs. + - New error messages for wrongs graphs. ### Removed -## 0.2.1 +## [0.2.1] - Dependency update ### Added @@ -32,7 +32,7 @@ Keep it human-readable, your future self will thank you! - anemoi-datasets dependency -## 0.2.0 +## [0.2.0] - Support Heterodata ### Added @@ -42,18 +42,15 @@ Keep it human-readable, your future self will thank you! - Updated to support new PyTorch Geometric HeteroData structure (defined by `anemoi-graphs` package). -### Removed - -## 0.1.0 Initial Release +## [0.1.0] - Initial Release ### Added - Documentation - Initial code release with models, layers, distributed, preprocessing, and data_indices - Added Changelog -### Changed - -### Removed - -## Git Diffs: + +[unreleased]: https://github.com/ecmwf/anemoi-models/compare/0.2.1...HEAD +[0.2.1]: https://github.com/ecmwf/anemoi-models/compare/0.2.0...0.2.1 +[0.2.0]: https://github.com/ecmwf/anemoi-models/compare/0.1.0...0.2.0 [0.1.0]: https://github.com/ecmwf/anemoi-models/releases/tag/0.1.0 From 495a892ed5ef955211a4aa4bbe95f4331d65ec1f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 17:03:06 +0200 Subject: [PATCH 09/16] [pre-commit.ci] pre-commit autoupdate (#25) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/psf/black-pre-commit-mirror: 24.4.2 → 24.8.0](https://github.com/psf/black-pre-commit-mirror/compare/24.4.2...24.8.0) - [github.com/astral-sh/ruff-pre-commit: v0.4.6 → v0.6.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.6...v0.6.2) - [github.com/tox-dev/pyproject-fmt: 2.1.3 → 2.2.1](https://github.com/tox-dev/pyproject-fmt/compare/2.1.3...2.2.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f4b6367..9dc25d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: - id: check-added-large-files # Check for large files added to git - id: check-merge-conflict # Check for files that contain merge conflict - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.4.2 + rev: 24.8.0 hooks: - id: black args: [--line-length=120] @@ -34,7 +34,7 @@ repos: - --force-single-line-imports - --profile black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.6 + rev: v0.6.2 hooks: - id: ruff # Next line if for documenation cod snippets @@ -65,6 +65,6 @@ repos: - id: optional-dependencies-all args: ["--inplace", "--exclude-keys=dev,docs,tests", "--group=dev=all,docs,tests"] - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.3" + rev: "2.2.1" hooks: - id: pyproject-fmt From 88680f58f574939fbeef93bd7b96db3743679ff4 Mon Sep 17 00:00:00 2001 From: Helen Theissen Date: Tue, 27 Aug 2024 16:03:30 +0100 Subject: [PATCH 10/16] Ci/changelog-release-updater (#26) * ci: add changelof release updater * docs: update changelog --- .../workflows/changelog-release-update.yml | 34 +++++++++++++++++++ CHANGELOG.md | 1 + 2 files changed, 35 insertions(+) create mode 100644 .github/workflows/changelog-release-update.yml diff --git a/.github/workflows/changelog-release-update.yml b/.github/workflows/changelog-release-update.yml new file mode 100644 index 0000000..79b85ad --- /dev/null +++ b/.github/workflows/changelog-release-update.yml @@ -0,0 +1,34 @@ +# .github/workflows/update-changelog.yaml +name: "Update Changelog" + +on: + release: + types: [released] + +permissions: + pull-requests: write + contents: write + +jobs: + update: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.release.target_commitish }} + + - name: Update Changelog + uses: stefanzweifel/changelog-updater-action@v1 + with: + latest-version: ${{ github.event.release.tag_name }} + heading-text: ${{ github.event.release.name }} + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v6 + with: + branch: docs/changelog-update-${{ github.event.release.tag_name }} + title: '[Changelog] Update to ${{ github.event.release.tag_name }}' + add-paths: | + CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 04e8df4..52c3366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Keep it human-readable, your future self will thank you! ### Added - configurabilty of the dropout probability in the the MultiHeadSelfAttention module + - CI workflow to update the changelog on release ### Changed From bc2c0d6c039f43a9df2bf01748be7d7acf7bfdc0 Mon Sep 17 00:00:00 2001 From: Helen Theissen Date: Thu, 8 Aug 2024 13:18:50 +0100 Subject: [PATCH 11/16] Feature/integrate reusable workflows (#16) * ci: add public pr label * ci: add readthedocs update check * ci: add downstream ci * ci: add ci-config * chore(deps): remove unused dependency * docs: update changelog * ci: switch to main --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52c3366..1ad4f61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,9 @@ Keep it human-readable, your future self will thank you! ### Added - configurabilty of the dropout probability in the the MultiHeadSelfAttention module - - CI workflow to update the changelog on release +- CI workflow to update the changelog on release +- added downstream-ci pipeline +- readthedocs PR update check action ### Changed From 0d048ad29e524ef994eceb4656701734dfbfdc9a Mon Sep 17 00:00:00 2001 From: Gert Mertes Date: Fri, 9 Aug 2024 08:33:22 +0000 Subject: [PATCH 12/16] chore: changelog 0.2.1 --- CHANGELOG.md | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ad4f61..0d5c487 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,26 +11,12 @@ Keep it human-readable, your future self will thank you! ## [Unreleased] ### Added + - configurabilty of the dropout probability in the the MultiHeadSelfAttention module - CI workflow to update the changelog on release -- added downstream-ci pipeline -- readthedocs PR update check action ### Changed - - Update CI to inherit from common infrastructue reusable workflows - - run downstream-ci only when src and tests folders have changed - - New error messages for wrongs graphs. - -### Removed - -## [0.2.1] - Dependency update - -### Added - -- downstream-ci pipeline -- readthedocs PR update check action - ### Removed - anemoi-datasets dependency From 1be9ddeeb3e6ffa862ef0b40547b97da787001c6 Mon Sep 17 00:00:00 2001 From: Mario Santa Cruz <48736305+JPXKQX@users.noreply.github.com> Date: Fri, 9 Aug 2024 11:21:58 +0200 Subject: [PATCH 13/16] Update error messages from invalid sub_graph in model instantiation (#20) --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d5c487..bf32746 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ Keep it human-readable, your future self will thank you! ### Changed +- New error messages for wrongs graphs. + ### Removed - anemoi-datasets dependency From cbe8c4978ccb097aea924f8329414baa6fa988f2 Mon Sep 17 00:00:00 2001 From: Jesper Dramsch Date: Fri, 9 Aug 2024 15:17:08 +0200 Subject: [PATCH 14/16] ci: inherit pypi publish flow (#17) * ci: inherit pypi publish flow Co-authored-by: Helen Theissen * docs: add to changelog * fix: typo in reusable workflow * fix: another typo * chore: bump actions/setup-python to v5 * ci: run downstream-ci for changes in src and tests * docs: update changelog --------- Co-authored-by: Helen Theissen --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf32746..be834a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ Keep it human-readable, your future self will thank you! - CI workflow to update the changelog on release ### Changed + - Update CI to inherit from common infrastructue reusable workflows + - run downstream-ci only when src and tests folders have changed - New error messages for wrongs graphs. From c7f6c3ae0a51af224f878c166fc9dee1d9bcd9e1 Mon Sep 17 00:00:00 2001 From: Jesper Dramsch Date: Fri, 9 Aug 2024 15:21:37 +0200 Subject: [PATCH 15/16] Update CHANGELOG.md to KeepChangelog format --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be834a2..9adab92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,10 +16,19 @@ Keep it human-readable, your future self will thank you! - CI workflow to update the changelog on release ### Changed + - Update CI to inherit from common infrastructue reusable workflows - run downstream-ci only when src and tests folders have changed + - New error messages for wrongs graphs. + +### Removed + +## [0.2.1] - Dependency update + +### Added -- New error messages for wrongs graphs. +- downstream-ci pipeline +- readthedocs PR update check action ### Removed From 4a96f2d87e6e6d3930210d2a8a4945d771367f41 Mon Sep 17 00:00:00 2001 From: Helen Theissen Date: Tue, 27 Aug 2024 16:03:30 +0100 Subject: [PATCH 16/16] Ci/changelog-release-updater (#26) * ci: add changelof release updater * docs: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9adab92..b027a40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Keep it human-readable, your future self will thank you! ## [Unreleased] ### Added + - CI workflow to update the changelog on release - configurabilty of the dropout probability in the the MultiHeadSelfAttention module - CI workflow to update the changelog on release