Skip to content

Commit

Permalink
fix: annotation error
Browse files Browse the repository at this point in the history
  • Loading branch information
theissenhelen committed Oct 3, 2024
1 parent c841324 commit 6c12dda
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 9 deletions.
4 changes: 2 additions & 2 deletions src/anemoi/models/layers/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ def __init__(
window_size: Optional[int] = None,
dropout_p: float = 0.0,
use_flash_attention: bool = False,
softcap: float | None = None,
use_alibi_slopes: bool | None = None,
softcap: float = None,
use_alibi_slopes: bool = None,
):
"""Initialize MultiHeadSelfAttention.
Expand Down
4 changes: 2 additions & 2 deletions src/anemoi/models/layers/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,8 @@ def __init__(
window_size: int,
dropout_p: float = 0.0,
use_flash_attention: bool = False,
softcap: float | None = None,
use_alibi_slopes: bool | None = None,
softcap: float = None,
use_alibi_slopes: bool = None,
):
super().__init__()

Expand Down
4 changes: 2 additions & 2 deletions src/anemoi/models/layers/chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ def __init__(
activation: str = "GELU",
dropout_p: float = 0.0,
use_flash_attention: bool = False,
softcap: float | None = None,
use_alibi_slopes: bool | None = None,
softcap: float = None,
use_alibi_slopes: bool = None,
) -> None:
"""Initialize TransformerProcessor.
Expand Down
6 changes: 3 additions & 3 deletions src/anemoi/models/layers/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ def __init__(
mlp_hidden_ratio: int = 4,
dropout_p: float = 0.1,
use_flash_attention: bool = False,
softcap: float | None = 0.0,
use_alibi_slopes: Tensor | None = None,
softcap: float = 0.0,
use_alibi_slopes: Tensor = None,
**kwargs,
) -> None:
"""Initialize TransformerProcessor.
Expand All @@ -120,7 +120,7 @@ def __init__(
dropout_p: float, optional
Dropout probability used for multi-head self attention, default 0.0
softcap : float, optional
Anything > 0 activates softcapping flash attention, by default 0.0
Anything > 0 activates softcapping flash attention, by default None
use_alibi_slopes : bool, optional
Use aLiBI option, only used for flash attention, by default None
"""
Expand Down

0 comments on commit 6c12dda

Please sign in to comment.