From ba5901d356bfa1c4f8974929bb35cb4c6937e9f1 Mon Sep 17 00:00:00 2001
From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com>
Date: Wed, 18 Sep 2024 15:25:11 -0400
Subject: [PATCH] Adopt SPEC0 calendar, drop Python3.9 (#1915)
### What kind of change does this PR introduce?
* Extends support for Python3.13 and drops support for Python3.9
* Raises the base required versions for a handful of dependencies based
on Scientific Python's SPEC 0 standard:
* `ipython` to v8.5.0+
* `matplotlib` to v3.6.0+
* `numpy` to v1.23+
* `scikit-learn` to v1.1.0+
* Updates the codebase to use Python3.10+ conventions
* `nbsphinx` has been pinned to v0.9.5+
### Does this PR introduce a breaking change?
Yes. Python3.9 support has been dropped.
### Other information:
https://scientific-python.org/specs/spec-0000/
https://peps.python.org/pep-0619/
Notable changes:
* https://peps.python.org/pep-0604/
* https://peps.python.org/pep-0618/
---
.github/workflows/main.yml | 27 +++----
CHANGELOG.rst | 6 ++
CI/requirements_ci.in | 5 +-
CI/requirements_ci.txt | 75 ++-----------------
CONTRIBUTING.rst | 22 ++++--
docs/notebooks/analogs.ipynb | 4 +-
docs/notebooks/benchmarks/sdba_quantile.ipynb | 4 +-
docs/notebooks/ensembles.ipynb | 6 +-
docs/notebooks/partitioning.ipynb | 2 +-
docs/notebooks/sdba.ipynb | 4 +-
environment.yml | 18 ++---
pyproject.toml | 26 +++----
tests/test_cffwis.py | 6 +-
tests/test_cli.py | 4 +-
tests/test_ensembles.py | 2 +-
tests/test_flags.py | 6 +-
tests/test_indicators.py | 3 +-
tests/test_locales.py | 2 +-
tox.ini | 27 +++----
xclim/core/bootstrapping.py | 3 +-
xclim/core/calendar.py | 14 ++--
xclim/core/formatting.py | 10 +--
xclim/core/indicator.py | 28 +++----
xclim/core/locales.py | 2 +-
xclim/core/options.py | 2 +-
xclim/core/units.py | 11 +--
xclim/core/utils.py | 7 +-
xclim/ensembles/_base.py | 2 +-
xclim/ensembles/_robustness.py | 2 +-
xclim/indices/_agro.py | 2 +-
xclim/indices/_anuclim.py | 3 +-
xclim/indices/_multivariate.py | 3 +-
xclim/indices/fire/_cffwis.py | 2 +-
xclim/indices/generic.py | 5 +-
xclim/indices/run_length.py | 2 +-
xclim/sdba/_adjustment.py | 11 ++-
xclim/sdba/base.py | 7 +-
xclim/sdba/loess.py | 2 +-
xclim/sdba/processing.py | 2 +-
xclim/sdba/properties.py | 4 +-
xclim/sdba/utils.py | 4 +-
xclim/testing/sdba_utils.py | 2 +-
xclim/testing/utils.py | 6 +-
43 files changed, 174 insertions(+), 211 deletions(-)
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 35f7a5200..30d1ee4ef 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -32,15 +32,12 @@ permissions:
jobs:
lint:
- name: Lint (Python${{ matrix.python-version }})
+ name: Lint
runs-on: ubuntu-latest
if: |
(github.event.action != 'labeled') ||
(github.event.review.state == 'approved') ||
(github.event_name == 'push')
- strategy:
- matrix:
- python-version: [ "3.9" ]
steps:
- name: Harden Runner
uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1
@@ -53,10 +50,10 @@ jobs:
pypi.org:443
- name: Checkout Repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- - name: Set up Python${{ matrix.python-version }}
+ - name: Set up Python3
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
- python-version: ${{ matrix.python-version }}
+ python-version: "3.x"
cache: 'pip'
- name: Install CI libraries
run: |
@@ -82,7 +79,7 @@ jobs:
strategy:
matrix:
os: [ 'ubuntu-latest' ]
- python-version: [ "3.9" ]
+ python-version: [ "3.10" ]
testdata-cache: [ '~/.cache/xclim-testdata' ]
steps:
- name: Harden Runner
@@ -153,20 +150,20 @@ jobs:
- os: 'windows-latest'
testdata-cache: 'C:\Users\runneradmin\AppData\Local\xclim-testdata\xclim-testdata\Cache'
markers: -m 'not slow'
- python-version: "3.9"
- tox-env: py39-coverage-prefetch # Test data prefetch is needed for Windows
+ python-version: "3.10"
+ tox-env: py310-coverage-prefetch # Test data prefetch is needed for Windows
# macOS builds
- os: 'macos-latest'
testdata-cache: '~/Library/Caches/xclim-testdata'
markers: '' # Slow tests
- python-version: "3.10"
- tox-env: py310-coverage-extras
+ python-version: "3.11"
+ tox-env: py311-coverage-extras
# Specialized tests
- os: 'ubuntu-latest'
testdata-cache: '~/.cache/xclim-testdata'
markers: -m 'not requires_internet and not slow'
- python-version: "3.9"
- tox-env: py39-coverage-offline-prefetch
+ python-version: "3.11"
+ tox-env: py311-coverage-sbck-offline-prefetch
- os: 'ubuntu-latest'
testdata-cache: '~/.cache/xclim-testdata'
markers: '' # No markers for notebooks
@@ -198,7 +195,7 @@ jobs:
- name: Checkout Repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Eigen3 (SBCK)
- if: ${{ matrix.python-version == '3.11' }}
+ if: ${{ matrix.python-version == '3.11' && matrix.os == 'ubuntu-latest' }}
run: |
sudo apt-get update
sudo apt-get install libeigen3-dev
@@ -247,7 +244,7 @@ jobs:
strategy:
matrix:
os: [ 'ubuntu-latest' ]
- python-version: [ "3.9", "3.12" ]
+ python-version: [ "3.10", "3.12" ]
testdata-cache: [ '~/.cache/xclim-testdata' ]
defaults:
run:
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 9b1c67b40..1fb325df6 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -6,6 +6,11 @@ v0.53.0 (unreleased)
--------------------
Contributors to this version: Adrien Lamarche (:user:`LamAdr`), Trevor James Smith (:user:`Zeitsperre`), Éric Dupuis (:user:`coxipi`), Pascal Bourgault (:user:`aulemahal`).
+Announcements
+^^^^^^^^^^^^^
+* `xclim` has now adopted the `Scientific Python SPEC 0 `_ conventions for its suggested dependency support schedule. (:issue:`1914`, :pull:`1915`).
+* `xclim` has dropped support for Python 3.9 and adopted Python 3.10+ code styling conventions. (:issue:`1914`, :pull:`1915`).
+
New indicators
^^^^^^^^^^^^^^
* New ``heat_spell_frequency``, ``heat_spell_max_length`` and ``heat_spell_total_length`` : spell length statistics on a bivariate condition that uses the average over a window by default. (:pull:`1885`).
@@ -25,6 +30,7 @@ Breaking changes
* `platformdirs` is no longer a direct dependency of `xclim`, but `pooch` is required to use many of the new testing functions (installable via `pip install pooch` or `pip install 'xclim[dev]'`). (:pull:`1889`).
* The following previously-deprecated functions have now been removed from `xclim`: ``xclim.core.calendar.convert_calendar``, ``xclim.core.calendar.date_range``, ``xclim.core.calendar.date_range_like``, ``xclim.core.calendar.interp_calendar``, ``xclim.core.calendar.days_in_year``, ``xclim.core.calendar.datetime_to_decimal_year``. For guidance on how to migrate to alternatives, see the `version 0.50.0 Breaking changes <#v0-50-0-2024-06-17>`_. (:issue:`1010`, :pull:`1845`).
* `transform` argument of `OTC/dOTC` classes (and child functions) is changed to `normalization`, and `numIterMax` is changed to `num_iter_max` in `utils.optimal_transport` (:pull:`1896`).
+* `xclim` now requires `numpy >=1.23.0` and `scikit-learn >=1.1.0`, as well as (optionally) `ipython >=8.5.0`, `nbsphinx >=0.9.5`, and `matplotlib >=3.6.0` . (:issue:`1914`, :pull:`1915`).
Internal changes
^^^^^^^^^^^^^^^^
diff --git a/CI/requirements_ci.in b/CI/requirements_ci.in
index 6db098e36..34b67cbe4 100644
--- a/CI/requirements_ci.in
+++ b/CI/requirements_ci.in
@@ -1,8 +1,7 @@
bump-my-version==0.26.1
-coveralls==4.0.1
deptry==0.20.0
flit==3.9.0
-pip==24.2
+pip==24.2.0
pylint==3.2.7
tox==4.19.0
-tox-gh==1.3.3
+tox-gh==1.4.1
diff --git a/CI/requirements_ci.txt b/CI/requirements_ci.txt
index 8faaf0b2c..a94e6ea49 100644
--- a/CI/requirements_ci.txt
+++ b/CI/requirements_ci.txt
@@ -1,5 +1,5 @@
#
-# This file is autogenerated by pip-compile with Python 3.9
+# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile --generate-hashes --output-file=CI/requirements_ci.txt CI/requirements_ci.in
@@ -135,64 +135,6 @@ colorama==0.4.6 \
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
# via tox
-coverage[toml]==7.6.0 \
- --hash=sha256:0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382 \
- --hash=sha256:01c322ef2bbe15057bc4bf132b525b7e3f7206f071799eb8aa6ad1940bcf5fb1 \
- --hash=sha256:03cafe82c1b32b770a29fd6de923625ccac3185a54a5e66606da26d105f37dac \
- --hash=sha256:044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee \
- --hash=sha256:07ed352205574aad067482e53dd606926afebcb5590653121063fbf4e2175166 \
- --hash=sha256:0d1b923fc4a40c5832be4f35a5dab0e5ff89cddf83bb4174499e02ea089daf57 \
- --hash=sha256:0e7b27d04131c46e6894f23a4ae186a6a2207209a05df5b6ad4caee6d54a222c \
- --hash=sha256:1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b \
- --hash=sha256:289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51 \
- --hash=sha256:3c59105f8d58ce500f348c5b56163a4113a440dad6daa2294b5052a10db866da \
- --hash=sha256:46c3d091059ad0b9c59d1034de74a7f36dcfa7f6d3bde782c49deb42438f2450 \
- --hash=sha256:482855914928c8175735a2a59c8dc5806cf7d8f032e4820d52e845d1f731dca2 \
- --hash=sha256:49c76cdfa13015c4560702574bad67f0e15ca5a2872c6a125f6327ead2b731dd \
- --hash=sha256:4b03741e70fb811d1a9a1d75355cf391f274ed85847f4b78e35459899f57af4d \
- --hash=sha256:4bea27c4269234e06f621f3fac3925f56ff34bc14521484b8f66a580aacc2e7d \
- --hash=sha256:4d5fae0a22dc86259dee66f2cc6c1d3e490c4a1214d7daa2a93d07491c5c04b6 \
- --hash=sha256:543ef9179bc55edfd895154a51792b01c017c87af0ebaae092720152e19e42ca \
- --hash=sha256:54dece71673b3187c86226c3ca793c5f891f9fc3d8aa183f2e3653da18566169 \
- --hash=sha256:6379688fb4cfa921ae349c76eb1a9ab26b65f32b03d46bb0eed841fd4cb6afb1 \
- --hash=sha256:65fa405b837060db569a61ec368b74688f429b32fa47a8929a7a2f9b47183713 \
- --hash=sha256:6616d1c9bf1e3faea78711ee42a8b972367d82ceae233ec0ac61cc7fec09fa6b \
- --hash=sha256:6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6 \
- --hash=sha256:7221f9ac9dad9492cecab6f676b3eaf9185141539d5c9689d13fd6b0d7de840c \
- --hash=sha256:76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605 \
- --hash=sha256:7792f0ab20df8071d669d929c75c97fecfa6bcab82c10ee4adb91c7a54055463 \
- --hash=sha256:831b476d79408ab6ccfadaaf199906c833f02fdb32c9ab907b1d4aa0713cfa3b \
- --hash=sha256:9146579352d7b5f6412735d0f203bbd8d00113a680b66565e205bc605ef81bc6 \
- --hash=sha256:9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5 \
- --hash=sha256:a73d18625f6a8a1cbb11eadc1d03929f9510f4131879288e3f7922097a429f63 \
- --hash=sha256:a8659fd33ee9e6ca03950cfdcdf271d645cf681609153f218826dd9805ab585c \
- --hash=sha256:a94925102c89247530ae1dab7dc02c690942566f22e189cbd53579b0693c0783 \
- --hash=sha256:ad4567d6c334c46046d1c4c20024de2a1c3abc626817ae21ae3da600f5779b44 \
- --hash=sha256:b2e16f4cd2bc4d88ba30ca2d3bbf2f21f00f382cf4e1ce3b1ddc96c634bc48ca \
- --hash=sha256:bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8 \
- --hash=sha256:beb08e8508e53a568811016e59f3234d29c2583f6b6e28572f0954a6b4f7e03d \
- --hash=sha256:c4cbe651f3904e28f3a55d6f371203049034b4ddbce65a54527a3f189ca3b390 \
- --hash=sha256:c7b525ab52ce18c57ae232ba6f7010297a87ced82a2383b1afd238849c1ff933 \
- --hash=sha256:ca5d79cfdae420a1d52bf177de4bc2289c321d6c961ae321503b2ca59c17ae67 \
- --hash=sha256:cdab02a0a941af190df8782aafc591ef3ad08824f97850b015c8c6a8b3877b0b \
- --hash=sha256:d17c6a415d68cfe1091d3296ba5749d3d8696e42c37fca5d4860c5bf7b729f03 \
- --hash=sha256:d39bd10f0ae453554798b125d2f39884290c480f56e8a02ba7a6ed552005243b \
- --hash=sha256:d4b3cd1ca7cd73d229487fa5caca9e4bc1f0bca96526b922d61053ea751fe791 \
- --hash=sha256:d50a252b23b9b4dfeefc1f663c568a221092cbaded20a05a11665d0dbec9b8fb \
- --hash=sha256:da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807 \
- --hash=sha256:dcd070b5b585b50e6617e8972f3fbbee786afca71b1936ac06257f7e178f00f6 \
- --hash=sha256:ddaaa91bfc4477d2871442bbf30a125e8fe6b05da8a0015507bfbf4718228ab2 \
- --hash=sha256:df423f351b162a702c053d5dddc0fc0ef9a9e27ea3f449781ace5f906b664428 \
- --hash=sha256:dff044f661f59dace805eedb4a7404c573b6ff0cdba4a524141bc63d7be5c7fd \
- --hash=sha256:e7e128f85c0b419907d1f38e616c4f1e9f1d1b37a7949f44df9a73d5da5cd53c \
- --hash=sha256:ed8d1d1821ba5fc88d4a4f45387b65de52382fa3ef1f0115a4f7a20cdfab0e94 \
- --hash=sha256:f2501d60d7497fd55e391f423f965bbe9e650e9ffc3c627d5f0ac516026000b8 \
- --hash=sha256:f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b
- # via coveralls
-coveralls==4.0.1 \
- --hash=sha256:7a6b1fa9848332c7b2221afb20f3df90272ac0167060f41b5fe90429b30b1809 \
- --hash=sha256:7b2a0a2bcef94f295e3cf28dcc55ca40b71c77d1c2446b538e85f0f7bc21aa69
- # via -r CI/requirements_ci.in
deptry==0.20.0 \
--hash=sha256:012fb106dbea6ca95196cdcd75ac90c516c8f01292f7934f2e802a7cf025a660 \
--hash=sha256:0c90ce64e637d0e902bc97c5a020adecfee9e9f09ee0bf4c61554994139bebdb \
@@ -215,9 +157,6 @@ distlib==0.3.8 \
--hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \
--hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64
# via virtualenv
-docopt==0.6.2 \
- --hash=sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491
- # via coveralls
docutils==0.21.2 \
--hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \
--hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2
@@ -401,9 +340,7 @@ questionary==2.0.1 \
requests==2.32.3 \
--hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \
--hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6
- # via
- # coveralls
- # flit
+ # via flit
rich==13.7.1 \
--hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \
--hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432
@@ -418,7 +355,6 @@ tomli==2.0.1 \
--hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
--hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
# via
- # coverage
# deptry
# pylint
# pyproject-api
@@ -439,9 +375,9 @@ tox==4.19.0 \
# via
# -r CI/requirements_ci.in
# tox-gh
-tox-gh==1.3.3 \
- --hash=sha256:7a05f176ab2180554ada4fc74fd25529d8470d8944d99ac77dfa7b0773a20e25 \
- --hash=sha256:99cfcac42706c6b5f4656ca1a4b5fe50b1f1d2c14549c0a542469fb679d29a3f
+tox-gh==1.4.1 \
+ --hash=sha256:005b33d16eef1bd1dae9f7d8b3cef53374af7d475f9c9c33ef098247741fb694 \
+ --hash=sha256:da422beccbdc5ad5994fe8faf6c193f2d794e957628b052ba23e7fcf9e2e340f
# via -r CI/requirements_ci.in
typing-extensions==4.12.2 \
--hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \
@@ -450,7 +386,6 @@ typing-extensions==4.12.2 \
# astroid
# pydantic
# pydantic-core
- # pylint
# rich-click
urllib3==2.2.2 \
--hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index df93ee2f3..0e6a1e73c 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -162,8 +162,8 @@ Ready to contribute? Here's how to set up `xclim` for local development.
Alternatively, one can use ``$ tox`` to run very specific testing configurations, as GitHub Workflows would do when a Pull Request is submitted and new commits are pushed::
- $ tox -e py39-coverage # run tests on Python 3.9, reporting code coverage
- $ tox -e py310-upstream # run tests on Python 3.10, with upstream dependencies
+ $ tox -e py310-coverage # run tests on Python 3.10, reporting code coverage
+ $ tox -e py313-upstream # run tests on Python 3.13, with upstream dependencies
$ tox -e py311-prefetch-offline -- -m "not slow" # run tests on Python 3.11, force download of testing, ensure tests are all offline, exclude "slow" marked tests
$ tox -e py312-lmoments -- -m "not slow" # run tests on Python 3.12, installing lmoments3, excluding "slow" marked tests
$ tox -e notebooks,doctests # run the notebook-based tests, then run the doctests
@@ -227,7 +227,8 @@ Before you submit a pull request, please follow these guidelines:
.. warning::
Try to keep your contributions within the scope of the issue that you are addressing.
- While it might be tempting to fix other aspects of the library as it comes up, it's better to simply to flag the problems in case others are already working on it.
+ While it might be tempting to fix other aspects of the library as it comes up,
+ it's better to simply to flag the problems in case others are already working on it.
Consider adding a "**# TODO:**" or "**# FIXME:**" comment if the need arises.
@@ -235,7 +236,9 @@ Before you submit a pull request, please follow these guidelines:
If you are adding a new set of functions, they **must be tested** and **coverage percentage should not significantly decrease.**
-#. If the pull request adds functionality, your functions should include docstring explanations. So long as the docstrings are syntactically correct, sphinx-autodoc will be able to automatically parse the information. Please ensure that the docstrings and documentation adhere to the following standards (badly formed docstrings will fail build tests):
+#. If the pull request adds functionality, your functions should include docstring explanations.
+ So long as the docstrings are syntactically correct, sphinx-autodoc will be able to automatically parse the information.
+ Please ensure that the docstrings and documentation adhere to the following standards (badly formed docstrings will fail build tests):
* `numpydoc`_
* `reStructuredText (ReST)`_
@@ -244,17 +247,24 @@ Before you submit a pull request, please follow these guidelines:
If you aren't accustomed to writing documentation in reStructuredText (`.rst`), we encourage you to spend a few minutes going over the
incredibly well-summarized `reStructuredText Primer`_ from the sphinx-doc maintainer community.
-#. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12 as well as raise test coverage.
+#. The pull request should work for all currently-supported Python versions as well as raise test coverage.
Pull requests are also checked for documentation build status and for `PEP8`_ compliance.
The build statuses and build errors for pull requests can be found at: https://github.com/Ouranosinc/xclim/actions
+ .. note::
+ The currently-supported Python versions are loosely based on the Scientific Python Ecosystem's `SPEC 0` schedule.
+ Generally, when `numpy` and `xarray` drop support for a dependency, `xclim` will follow suit in a subsequent release.
+ For more information, see the `SPEC 0 Schedule `_
+
.. warning::
PEP8, black, pytest (with xdoctest) and pydocstyle (for numpy docstrings) conventions are strongly enforced.
Ensure that your changes pass all tests prior to pushing your final commits to your branch.
Code formatting errors are treated as build errors and will block your pull request from being accepted.
-#. The version changes (CHANGELOG.rst) should briefly describe changes introduced in the Pull request. Changes should be organized by type (ie: `New indicators`, `New features and enhancements`, `Breaking changes`, `Bug fixes`, `Internal changes`) and the GitHub Pull Request, GitHub Issue. Your name and/or GitHub handle should also be listed among the contributors to this version. This can be done as follows::
+#. The version changes (CHANGELOG.rst) should briefly describe changes introduced in the Pull request.
+ Changes should be organized by type (ie: `New indicators`, `New features and enhancements`, `Breaking changes`, `Bug fixes`, `Internal changes`) and the GitHub Pull Request, GitHub Issue.
+ Your name and/or GitHub handle should also be listed among the contributors to this version. This can be done as follows::
Contributors to this version: John Jacob Jingleheimer Schmidt (:user:`username`).
diff --git a/docs/notebooks/analogs.ipynb b/docs/notebooks/analogs.ipynb
index 06603902b..0afc21a40 100644
--- a/docs/notebooks/analogs.ipynb
+++ b/docs/notebooks/analogs.ipynb
@@ -108,7 +108,7 @@
"sim_std = convert_calendar(sim, \"standard\")\n",
"obs_chibou = obs.sel(lat=sim.lat, lon=sim.lon, method=\"nearest\")\n",
"\n",
- "for ax, var in zip(axs, obs_chibou.data_vars.keys()):\n",
+ "for ax, var in zip(axs, obs_chibou.data_vars.keys(), strict=False):\n",
" obs_chibou[var].plot(ax=ax, label=\"Observation\")\n",
" sim_std[var].plot(ax=ax, label=\"Simulation\")\n",
" ax.set_title(obs_chibou[var].long_name)\n",
@@ -233,7 +233,7 @@
"import time\n",
"\n",
"fig, axs = plt.subplots(4, 2, sharex=True, sharey=True, figsize=(10, 10))\n",
- "for metric, ax in zip(analog.metrics.keys(), axs.flatten()):\n",
+ "for metric, ax in zip(analog.metrics.keys(), axs.flatten(), strict=False):\n",
" start = time.perf_counter()\n",
" results = analog.spatial_analogs(sim, obs, method=metric)\n",
" print(f\"Metric {metric} took {time.perf_counter() - start:.0f} s.\")\n",
diff --git a/docs/notebooks/benchmarks/sdba_quantile.ipynb b/docs/notebooks/benchmarks/sdba_quantile.ipynb
index e115bb8fa..a244fd2b1 100644
--- a/docs/notebooks/benchmarks/sdba_quantile.ipynb
+++ b/docs/notebooks/benchmarks/sdba_quantile.ipynb
@@ -110,7 +110,9 @@
" sdba.nbutils.quantile(da, **kws).compute()\n",
" timed[use_fnq].append([size, time.time() - t0])\n",
"\n",
- "for k, lab in zip([True, False], [\"xclim.core.utils.nan_quantile\", \"fastnanquantile\"]):\n",
+ "for k, lab in zip(\n",
+ " [True, False], [\"xclim.core.utils.nan_quantile\", \"fastnanquantile\"], strict=False\n",
+ "):\n",
" arr = np.array(timed[k])\n",
" plt.plot(arr[:, 0], arr[:, 1] / num_tests, label=lab)\n",
"plt.legend()\n",
diff --git a/docs/notebooks/ensembles.ipynb b/docs/notebooks/ensembles.ipynb
index 737d5985b..664a36f6f 100644
--- a/docs/notebooks/ensembles.ipynb
+++ b/docs/notebooks/ensembles.ipynb
@@ -433,7 +433,7 @@
"fig, ax = plt.subplots(figsize=(6, 4))\n",
"mean_delta.plot(ax=ax)\n",
"# For each flag value plot the corresponding hatch.\n",
- "for val, ha in zip(robustness.flag_values, [None, \"\\\\\\\\\\\\\", \"xxx\"]):\n",
+ "for val, ha in zip(robustness.flag_values, [None, \"\\\\\\\\\\\\\", \"xxx\"], strict=False):\n",
" ax.pcolor(\n",
" robustness.lon,\n",
" robustness.lat,\n",
@@ -445,7 +445,9 @@
"ax.legend(\n",
" handles=[\n",
" Rectangle((0, 0), 2, 2, fill=False, hatch=h, label=lbl)\n",
- " for h, lbl in zip([\"\\\\\\\\\\\\\", \"xxx\"], robustness.flag_descriptions[1:])\n",
+ " for h, lbl in zip(\n",
+ " [\"\\\\\\\\\\\\\", \"xxx\"], robustness.flag_descriptions[1:], strict=False\n",
+ " )\n",
" ],\n",
" bbox_to_anchor=(0.0, 1.1),\n",
" loc=\"upper left\",\n",
diff --git a/docs/notebooks/partitioning.ipynb b/docs/notebooks/partitioning.ipynb
index d50665004..1e4fe88cd 100644
--- a/docs/notebooks/partitioning.ipynb
+++ b/docs/notebooks/partitioning.ipynb
@@ -57,7 +57,7 @@
"scenarios = [\"ssp245\", \"ssp370\", \"ssp585\"]\n",
"\n",
"data = []\n",
- "for model, member in zip(models, members):\n",
+ "for model, member in zip(models, members, strict=False):\n",
" for scenario in scenarios:\n",
" url = host + pat.format(model=model, scenario=scenario, member=member)\n",
"\n",
diff --git a/docs/notebooks/sdba.ipynb b/docs/notebooks/sdba.ipynb
index cfce4d70b..04015c0cd 100644
--- a/docs/notebooks/sdba.ipynb
+++ b/docs/notebooks/sdba.ipynb
@@ -750,7 +750,9 @@
"outputs": [],
"source": [
"fig, axs = plt.subplots(1, 2, figsize=(16, 4))\n",
- "for da, label in zip((ref, scenh, hist), (\"Reference\", \"Adjusted\", \"Simulated\")):\n",
+ "for da, label in zip(\n",
+ " (ref, scenh, hist), (\"Reference\", \"Adjusted\", \"Simulated\"), strict=False\n",
+ "):\n",
" ds = sdba.unstack_variables(da).isel(location=2)\n",
" # time series - tasmax\n",
" ds.tasmax.plot(ax=axs[0], label=label, alpha=0.65 if label == \"Adjusted\" else 1)\n",
diff --git a/environment.yml b/environment.yml
index b6573e620..74dce6f8b 100644
--- a/environment.yml
+++ b/environment.yml
@@ -3,7 +3,7 @@ channels:
- conda-forge
- defaults
dependencies:
- - python >=3.9
+ - python >=3.10,<3.14
- boltons >=20.1
- bottleneck >=1.3.1
- cf_xarray >=0.9.3
@@ -13,13 +13,13 @@ dependencies:
- filelock >=3.14.0
- jsonpickle >=3.1.0
- numba >=0.54.1
- - numpy >=1.20.0
+ - numpy >=1.23.0
- packaging >=24.0
- pandas >=2.2.0
- pint >=0.18.0
- pyarrow >=15.0.0 # Strongly encouraged for Pandas v2.2.0+
- pyyaml >=6.0.1
- - scikit-learn >=0.21.3
+ - scikit-learn >=1.1.0
- scipy >=1.9.0
- statsmodels >=0.14.2
- xarray >=2023.11.0
@@ -34,7 +34,7 @@ dependencies:
- cairosvg
- codespell ==2.3.0
- coverage >=7.5.0
- - coveralls >=4.0.0
+ - coveralls >=4.0.1 # Note: coveralls is not yet compatible with Python 3.13
- deptry ==0.18.0
- distributed >=2.0
- flake8 >=7.1.1
@@ -43,13 +43,13 @@ dependencies:
- furo >=2023.9.10
- h5netcdf >=1.3.0
- ipykernel
- - ipython
+ - ipython >=8.5.0
- isort ==5.13.2
- - matplotlib
+ - matplotlib >=3.6.0
- mypy >=1.10.0
- nbconvert <7.14 # Pinned due to directive errors in sphinx. See: https://github.com/jupyter/nbconvert/issues/2092
- nbqa >=1.8.2
- - nbsphinx
+ - nbsphinx >=0.9.5
- nbval >=0.11.0
- nc-time-axis >=1.4.1
- netcdf4 # Required for some Jupyter notebooks
@@ -72,11 +72,11 @@ dependencies:
- sphinx-mdinclude
- sphinxcontrib-bibtex
- tokenize-rt >=5.2.0
- - tox >=4.16.0
+ - tox >=4.18.1
# - tox-conda # Will be added when a tox@v4.0+ compatible plugin is released.
- vulture ==2.11
- xdoctest >=1.1.5
- yamllint >=1.35.1
- - pip >=24.0
+ - pip >=24.2.0
- pip:
- sphinxcontrib-svg2pdfconverter
diff --git a/pyproject.toml b/pyproject.toml
index 27c7f975c..9f66f0bc0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -12,7 +12,7 @@ maintainers = [
{name = "Pascal Bourgault", email = "bourgault.pascal@ouranos.ca"}
]
readme = {file = "README.rst", content-type = "text/x-rst"}
-requires-python = ">=3.9.0"
+requires-python = ">=3.10.0"
keywords = ["xclim", "xarray", "climate", "climatology", "bias correction", "ensemble", "indicators", "analysis"]
license = {file = "LICENSE"}
classifiers = [
@@ -23,10 +23,10 @@ classifiers = [
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ # "Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Hydrology",
@@ -36,21 +36,20 @@ dynamic = ["description", "version"]
dependencies = [
"boltons >=20.1",
"bottleneck >=1.3.1",
- # cf-xarray is differently named on conda-forge
- "cf-xarray >=0.9.3",
+ "cf-xarray >=0.9.3", # cf-xarray is differently named on conda-forge
"cftime >=1.4.1",
"click >=8.1",
"dask[array] >=2.6",
"filelock >=3.14.0",
"jsonpickle >=3.1.0",
"numba >=0.54.1",
- "numpy >=1.20.0",
+ "numpy >=1.23.0",
"packaging >=24.0",
"pandas >=2.2",
"pint >=0.18",
"pyarrow >=10.0.1", # Strongly encouraged for pandas v2.2.0+
"pyyaml >=6.0.1",
- "scikit-learn >=0.21.3",
+ "scikit-learn >=1.1.0",
"scipy >=1.9.0",
"statsmodels >=0.14.2",
"xarray >=2023.11.0",
@@ -65,19 +64,19 @@ dev = [
"bump-my-version ==0.26.1",
"codespell ==2.3.0",
"coverage[toml] >=7.5.0",
- "coveralls >=4.0.0",
+ "coveralls >=4.0.1", # coveralls is not yet compatible with Python 3.13
"deptry ==0.20.0",
"flake8 >=7.1.1",
"flake8-rst-docstrings >=0.3.0",
"h5netcdf>=1.3.0",
- "ipython",
+ "ipython >=8.5.0",
"isort ==5.13.2",
"mypy >=1.10.0",
"nbconvert <7.14", # Pinned due to directive errors in sphinx. See: https://github.com/jupyter/nbconvert/issues/2092
"nbqa >=1.8.2",
"nbval >=0.11.0",
"pandas-stubs >=2.2",
- "pip >=24.0",
+ "pip >=24.2.0",
"pooch >=1.8.0",
"pre-commit >=3.7",
"pylint >=3.2.4",
@@ -87,7 +86,7 @@ dev = [
"pytest-xdist[psutil] >=3.2",
"ruff >=0.5.6",
"tokenize-rt >=5.2.0",
- "tox >=4.17.0",
+ "tox >=4.18.1",
# "tox-conda", # Will be added when a tox@v4.0+ compatible plugin is released.
"vulture ==2.12",
"xdoctest >=1.1.5",
@@ -98,8 +97,8 @@ docs = [
"distributed >=2.0",
"furo >=2023.9.10",
"ipykernel",
- "matplotlib",
- "nbsphinx",
+ "matplotlib >=3.6.0",
+ "nbsphinx >=0.9.5",
"nc-time-axis >=1.4.1",
"pooch >=1.8.0",
"pybtex >=0.24.0",
@@ -224,7 +223,8 @@ exclude = [
]
[tool.mypy]
-python_version = "3.9"
+python_version = 3.10
+show_error_codes = true
enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"]
plugins = ["numpy.typing.mypy_plugin"]
strict = true
diff --git a/tests/test_cffwis.py b/tests/test_cffwis.py
index d17ae4b8e..fd7cddad7 100644
--- a/tests/test_cffwis.py
+++ b/tests/test_cffwis.py
@@ -418,7 +418,9 @@ def test_gfwed_and_indicators(self, open_dataset):
temp_end_thresh="6 degC",
)
- for exp, out in zip([ds.DC, ds.DMC, ds.FFMC, ds.ISI, ds.BUI, ds.FWI], outs):
+ for exp, out in zip(
+ [ds.DC, ds.DMC, ds.FFMC, ds.ISI, ds.BUI, ds.FWI], outs, strict=False
+ ):
np.testing.assert_allclose(
out.isel(loc=[0, 1]), exp.isel(loc=[0, 1]), rtol=0.03
)
@@ -455,7 +457,7 @@ def test_gfwed_and_indicators(self, open_dataset):
)
for exp, out in zip(
- [ds2.DC, ds2.DMC, ds2.FFMC, ds2.ISI, ds2.BUI, ds2.FWI], outs
+ [ds2.DC, ds2.DMC, ds2.FFMC, ds2.ISI, ds2.BUI, ds2.FWI], outs, strict=False
):
np.testing.assert_allclose(out, exp, rtol=0.03)
diff --git a/tests/test_cli.py b/tests/test_cli.py
index ee87df320..7505e3f4e 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -278,7 +278,9 @@ def test_suspicious_precipitation_flags(pr_series, tmp_path):
@pytest.mark.slow
def test_dataflags_output(tmp_path, tas_series, tasmax_series, tasmin_series):
ds = xr.Dataset()
- for series, val in zip([tas_series, tasmax_series, tasmin_series], [0, 10, -10]):
+ for series, val in zip(
+ [tas_series, tasmax_series, tasmin_series], [0, 10, -10], strict=False
+ ):
vals = val + K2C + np.sin(np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
ds = xr.merge([ds, arr])
diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py
index 896340634..abd8e25f3 100644
--- a/tests/test_ensembles.py
+++ b/tests/test_ensembles.py
@@ -60,7 +60,7 @@ def test_create_ensemble(self, open_dataset, ensemble_dataset_objects, nimbus):
# Kinda a hack? Alternative is to open and rewrite in a temp folder.
files = [nimbus.fetch(f) for f in ensemble_dataset_objects["nc_files_simple"]]
- ens2 = ensembles.create_ensemble(dict(zip(reals, files)))
+ ens2 = ensembles.create_ensemble(dict(zip(reals, files, strict=False)))
xr.testing.assert_identical(ens1, ens2)
def test_no_time(self, tmp_path, ensemble_dataset_objects, open_dataset):
diff --git a/tests/test_flags.py b/tests/test_flags.py
index 4560ecc88..75c729f5c 100644
--- a/tests/test_flags.py
+++ b/tests/test_flags.py
@@ -23,7 +23,7 @@ def test_tas_temperature_flags(
):
ds = xr.Dataset()
for series, val in zip(
- [tas_series, tasmax_series, tasmin_series], [0, 10, -10]
+ [tas_series, tasmax_series, tasmin_series], [0, 10, -10], strict=False
):
vals = val + K2C + np.sin(2 * np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
@@ -84,7 +84,7 @@ def test_suspicious_pr_data(self, pr_series):
def test_suspicious_tas_data(self, tas_series, tasmax_series, tasmin_series):
bad_ds = xr.Dataset()
for series, val in zip(
- [tas_series, tasmax_series, tasmin_series], [0, 10, -10]
+ [tas_series, tasmax_series, tasmin_series], [0, 10, -10], strict=False
):
vals = val + K2C + np.sin(2 * np.pi * np.arange(366 * 7) / 366)
arr = series(vals, start="1971-01-01")
@@ -118,7 +118,7 @@ def test_suspicious_tas_data(self, tas_series, tasmax_series, tasmin_series):
def test_raises(self, tasmax_series, tasmin_series):
bad_ds = xr.Dataset()
- for series, val in zip([tasmax_series, tasmin_series], [10, -10]):
+ for series, val in zip([tasmax_series, tasmin_series], [10, -10], strict=False):
vals = val + K2C + np.sin(2 * np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
bad_ds = xr.merge([bad_ds, arr])
diff --git a/tests/test_indicators.py b/tests/test_indicators.py
index 29aa1a988..04685218d 100644
--- a/tests/test_indicators.py
+++ b/tests/test_indicators.py
@@ -5,7 +5,6 @@
import gc
import json
from inspect import signature
-from typing import Union
import dask
import numpy as np
@@ -508,7 +507,7 @@ def test_signature():
"ds",
"indexer",
]
- assert sig.parameters["pr"].annotation == Union[xr.DataArray, str]
+ assert sig.parameters["pr"].annotation == xr.DataArray | str
assert sig.parameters["tas"].default == "tas"
assert sig.parameters["tas"].kind == sig.parameters["tas"].POSITIONAL_OR_KEYWORD
assert sig.parameters["thresh"].kind == sig.parameters["thresh"].KEYWORD_ONLY
diff --git a/tests/test_locales.py b/tests/test_locales.py
index 41707ddc2..24187dc5a 100644
--- a/tests/test_locales.py
+++ b/tests/test_locales.py
@@ -143,7 +143,7 @@ def test_xclim_translations(locale, official_indicators):
continue
# Both global attrs are present
is_complete = {"title", "abstract"}.issubset(set(trans))
- for _attrs, transattrs in zip(indcls.cf_attrs, trans["cf_attrs"]):
+ for _attrs, transattrs in zip(indcls.cf_attrs, trans["cf_attrs"], strict=False):
if {"long_name", "description"} - set(transattrs.keys()):
is_complete = False
diff --git a/tox.ini b/tox.ini
index d59e867cd..6a5320f62 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,29 +1,30 @@
[tox]
-min_version = 4.16.0
+min_version = 4.18.1
env_list =
lint
docs
notebooks
doctests
- py39
- py310-extras-numpy
- py311-extras-sbck
- py312-extras-lmoments
+ py310
+ py311-extras-numpy
+ py312-extras-sbck
+ py313-extras-lmoments
labels =
static = lint
- test = py39, py310-extras-numpy, py311-extras-sbck, py312-extras-lmoments
+ test = py310, py311-extras-numpy, py312-extras-sbck, py313-extras-lmoments
special = docs, notebooks, doctests
requires =
- pip >= 24.0
- flit >=3.9
+ pip >= 24.2.0
+ flit >=3.9,<4.0
opts = -vv
[gh]
python =
- 3.9 = py39-coverage
- 3.10 = py310-coverage-extras-numpy
+ 3.10 = py310-coverage-lmoments
3.11 = py311-coverage-extras-sbck
- 3.12 = py312-coverage-extras-lmoments
+ 3.12 = py312-coverage-extras-numpy
+ # coveralls is not yet supported for py313
+ 3.13 = py313-coverage-extras-lmoments
[testenv:lint]
description = Run code quality compliance tests under {basepython}
@@ -115,14 +116,14 @@ extras =
extras: extras
deps =
lmoments: lmoments3
- numpy: numpy>=1.20,<2.0
+ numpy: numpy>=1.23,<2.0
numpy: pint>=0.18,<0.24.0
- sbck: pybind11
upstream: -r CI/requirements_upstream.txt
install_command = python -m pip install --no-user {opts} {packages}
download = True
commands_pre =
sbck: python -c 'print("The sbck dependency requires the \"libeigen3-dev\" package to be installed on the system.")'
+ sbck: python -m pip install pybind11
sbck: python -m pip install sbck
python -m pip list
xclim show_version_info
diff --git a/xclim/core/bootstrapping.py b/xclim/core/bootstrapping.py
index d3dde697f..6e4897d6d 100644
--- a/xclim/core/bootstrapping.py
+++ b/xclim/core/bootstrapping.py
@@ -3,8 +3,9 @@
from __future__ import annotations
import warnings
+from collections.abc import Callable
from inspect import signature
-from typing import Any, Callable
+from typing import Any
import cftime
import numpy as np
diff --git a/xclim/core/calendar.py b/xclim/core/calendar.py
index 78c8a0460..1af11f5de 100644
--- a/xclim/core/calendar.py
+++ b/xclim/core/calendar.py
@@ -122,7 +122,7 @@ def get_calendar(obj: Any, dim: str = "time") -> str:
The Climate and Forecasting (CF) calendar name.
Will always return "standard" instead of "gregorian", following CF conventions 1.9.
"""
- if isinstance(obj, (xr.DataArray, xr.Dataset)):
+ if isinstance(obj, xr.DataArray | xr.Dataset):
return obj[dim].dt.calendar
if isinstance(obj, xr.CFTimeIndex):
obj = obj.values[0]
@@ -769,9 +769,9 @@ def time_bnds( # noqa: C901
So "2000-01-31 00:00:00" with a "3h" frequency, means a period going from "2000-01-31 00:00:00" to
"2000-01-31 02:59:59.999999".
"""
- if isinstance(time, (xr.DataArray, xr.Dataset)):
+ if isinstance(time, xr.DataArray | xr.Dataset):
time = time.indexes[time.name]
- elif isinstance(time, (DataArrayResample, DatasetResample)):
+ elif isinstance(time, DataArrayResample | DatasetResample):
for grouper in time.groupers:
if isinstance(grouper.grouper, xr.groupers.TimeResampler):
datetime = grouper.unique_coord.data
@@ -1519,7 +1519,9 @@ def _reconstruct_time(_time_as_delta, _start):
if window == stride:
# just concat them all
periods = []
- for i, (start, length) in enumerate(zip(starts.values, lengths.values)):
+ for i, (start, length) in enumerate(
+ zip(starts.values, lengths.values, strict=False)
+ ):
real_time = _reconstruct_time(time_as_delta, start)
periods.append(
da.isel(**{dim: i}, drop=True)
@@ -1543,7 +1545,9 @@ def _reconstruct_time(_time_as_delta, _start):
strd_frq = construct_offset(mult * stride, *args)
periods = []
- for i, (start, length) in enumerate(zip(starts.values, lengths.values)):
+ for i, (start, length) in enumerate(
+ zip(starts.values, lengths.values, strict=False)
+ ):
real_time = _reconstruct_time(time_as_delta, start)
slices = list(real_time.resample(time=strd_frq).groups.values())
if i == 0:
diff --git a/xclim/core/formatting.py b/xclim/core/formatting.py
index aaa18025b..45e545090 100644
--- a/xclim/core/formatting.py
+++ b/xclim/core/formatting.py
@@ -11,10 +11,10 @@
import string
import warnings
from ast import literal_eval
-from collections.abc import Sequence
+from collections.abc import Callable, Sequence
from fnmatch import fnmatch
from inspect import _empty, signature # noqa
-from typing import Any, Callable
+from typing import Any
import xarray as xr
from boltons.funcutils import wraps
@@ -425,7 +425,7 @@ def _call_and_add_history(*args, **kwargs):
else:
out = outs
- if not isinstance(out, (xr.DataArray, xr.Dataset)):
+ if not isinstance(out, xr.DataArray | xr.Dataset):
raise TypeError(
f"Decorated `update_xclim_history` received a non-xarray output from {func.__name__}."
)
@@ -473,11 +473,11 @@ def gen_call_string(funcname: str, *args, **kwargs) -> str:
"func(A, b=2.0, c='3', d=)"
"""
elements = []
- chain = itertools.chain(zip([None] * len(args), args), kwargs.items())
+ chain = itertools.chain(zip([None] * len(args), args, strict=False), kwargs.items())
for name, val in chain:
if isinstance(val, xr.DataArray):
rep = val.name or ""
- elif isinstance(val, (int, float, str, bool)) or val is None:
+ elif isinstance(val, int | float | str | bool) or val is None:
rep = repr(val)
else:
rep = repr(val)
diff --git a/xclim/core/indicator.py b/xclim/core/indicator.py
index dff9f6eab..7c1f0ddac 100644
--- a/xclim/core/indicator.py
+++ b/xclim/core/indicator.py
@@ -105,7 +105,7 @@
import warnings
import weakref
from collections import OrderedDict, defaultdict
-from collections.abc import Sequence
+from collections.abc import Callable, Sequence
from copy import deepcopy
from dataclasses import asdict, dataclass
from functools import reduce
@@ -115,7 +115,7 @@
from os import PathLike
from pathlib import Path
from types import ModuleType
-from typing import Any, Callable, Optional, Union
+from typing import Any
import numpy as np
import xarray
@@ -636,7 +636,7 @@ def _parse_output_attrs( # noqa: C901
n_outs = len(parent_cf_attrs) if parent_cf_attrs is not None else 1
for name in cls._cf_names:
arg = kwds.get(name)
- if isinstance(arg, (tuple, list)):
+ if isinstance(arg, tuple | list):
n_outs = len(arg)
# Populate new cf_attrs from parsing cf_names passed directly.
@@ -645,14 +645,14 @@ def _parse_output_attrs( # noqa: C901
values = kwds.pop(name, None)
if values is None: # None passed, skip
continue
- if not isinstance(values, (tuple, list)):
+ if not isinstance(values, tuple | list):
# a single string or callable, same for all outputs
values = [values] * n_outs
elif len(values) != n_outs: # A sequence of the wrong length.
raise ValueError(
f"Attribute {name} has {len(values)} elements but xclim expected {n_outs}."
)
- for attrs, value in zip(cf_attrs, values):
+ for attrs, value in zip(cf_attrs, values, strict=False):
if value: # Skip the empty ones (None or "")
attrs[name] = value
# else we assume a list of dicts
@@ -663,7 +663,7 @@ def _parse_output_attrs( # noqa: C901
# update from parent, if they have the same length.
if parent_cf_attrs is not None and len(parent_cf_attrs) == len(cf_attrs):
- for old, new in zip(parent_cf_attrs, cf_attrs):
+ for old, new in zip(parent_cf_attrs, cf_attrs, strict=False):
for attr, value in old.items():
new.setdefault(attr, value)
@@ -756,9 +756,9 @@ def _gen_signature(self):
InputKind.VARIABLE,
InputKind.OPTIONAL_VARIABLE,
]:
- annot = Union[DataArray, str]
+ annot = DataArray | str
if meta.kind == InputKind.OPTIONAL_VARIABLE:
- annot = Optional[annot]
+ annot = annot | None
variables.append(
_Parameter(
name,
@@ -832,7 +832,7 @@ def __call__(self, *args, **kwds):
# Metadata attributes from templates
var_id = None
- for out, attrs, base_attrs in zip(outs, out_attrs, self.cf_attrs):
+ for out, attrs, base_attrs in zip(outs, out_attrs, self.cf_attrs, strict=False):
if self.n_outs > 1:
var_id = base_attrs["var_name"]
attrs.update(units=out.units)
@@ -849,13 +849,13 @@ def __call__(self, *args, **kwds):
# Convert to output units
outs = [
convert_units_to(out, attrs["units"], self.context)
- for out, attrs in zip(outs, out_attrs)
+ for out, attrs in zip(outs, out_attrs, strict=False)
]
outs = self._postprocess(outs, das, params)
# Update variable attributes
- for out, attrs in zip(outs, out_attrs):
+ for out, attrs in zip(outs, out_attrs, strict=False):
var_name = attrs.pop("var_name")
out.attrs.update(attrs)
out.name = var_name
@@ -1232,7 +1232,7 @@ def _format(
for k, v in args.items():
if isinstance(v, units.Quantity):
mba[k] = f"{v:g~P}"
- elif isinstance(v, (int, float)):
+ elif isinstance(v, int | float):
mba[k] = f"{v:g}"
# TODO: What about InputKind.NUMBER_SEQUENCE
elif k == "indexer":
@@ -1762,7 +1762,7 @@ def build_indicator_module_from_yaml( # noqa: C901
# No suffix means we try to automatically detect the python file
indices = indfile
- if isinstance(indices, (str, Path)):
+ if isinstance(indices, str | Path):
indices = load_module(indices, name=module_name)
_translations: dict[str, dict] = {}
@@ -1778,7 +1778,7 @@ def build_indicator_module_from_yaml( # noqa: C901
_translations = {
lng: (
read_locale_file(trans, module=module_name, encoding=encoding)
- if isinstance(trans, (str, Path))
+ if isinstance(trans, str | Path)
else trans
)
for lng, trans in translations.items()
diff --git a/xclim/core/locales.py b/xclim/core/locales.py
index e65592dc7..f68edff1f 100644
--- a/xclim/core/locales.py
+++ b/xclim/core/locales.py
@@ -265,7 +265,7 @@ def load_locale(locdata: str | Path | dict[str, dict], locale: str):
locale : str
The locale name (IETF tag).
"""
- if isinstance(locdata, (str, Path)):
+ if isinstance(locdata, str | Path):
filename = Path(locdata)
locdata = read_locale_file(filename)
diff --git a/xclim/core/options.py b/xclim/core/options.py
index 1034ea584..90896f5d8 100644
--- a/xclim/core/options.py
+++ b/xclim/core/options.py
@@ -7,8 +7,8 @@
from __future__ import annotations
+from collections.abc import Callable
from inspect import signature
-from typing import Callable
from boltons.funcutils import wraps
diff --git a/xclim/core/units.py b/xclim/core/units.py
index bf494763c..0e393079d 100644
--- a/xclim/core/units.py
+++ b/xclim/core/units.py
@@ -10,10 +10,11 @@
import logging
import warnings
+from collections.abc import Callable
from copy import deepcopy
from importlib.resources import files
from inspect import signature
-from typing import Any, Callable, Literal, cast
+from typing import Any, Literal, cast
import cf_xarray.units
import numpy as np
@@ -189,7 +190,7 @@ def pint2cfunits(value: units.Quantity | units.Unit) -> str:
str
Units following CF-Convention, using symbols.
"""
- if isinstance(value, (pint.Quantity, units.Quantity)):
+ if isinstance(value, pint.Quantity | units.Quantity):
value = value.units
# Force "1" if the formatted string is "" (pint < 0.24)
@@ -300,7 +301,7 @@ def convert_units_to( # noqa: C901
# Target units
if isinstance(target, units.Unit):
target_unit = target
- elif isinstance(target, (str, xr.DataArray)):
+ elif isinstance(target, str | xr.DataArray):
target_unit = units2pint(target)
else:
raise NotImplementedError(
@@ -372,7 +373,7 @@ def convert_units_to( # noqa: C901
return out
# TODO remove backwards compatibility of int/float thresholds after v1.0 release
- if isinstance(source, (float, int)):
+ if isinstance(source, float | int):
raise TypeError("Please specify units explicitly.")
raise NotImplementedError(f"Source of type `{type(source)}` is not supported.")
@@ -1115,7 +1116,7 @@ def check_units(
)
val = str(val).replace("UNSET ", "")
- if isinstance(val, (int, float)):
+ if isinstance(val, int | float):
raise TypeError("Please set units explicitly using a string.")
try:
diff --git a/xclim/core/utils.py b/xclim/core/utils.py
index cee032de8..fcca4c3ef 100644
--- a/xclim/core/utils.py
+++ b/xclim/core/utils.py
@@ -12,12 +12,11 @@
import logging
import os
import warnings
-from collections.abc import Sequence
+from collections.abc import Callable, Sequence
from enum import IntEnum
from inspect import _empty # noqa
from io import StringIO
from pathlib import Path
-from typing import Callable
import numpy as np
import xarray as xr
@@ -127,7 +126,7 @@ def ensure_chunk_size(da: xr.DataArray, **minchunks: int) -> xr.DataArray:
if not uses_dask(da):
return da
- all_chunks = dict(zip(da.dims, da.chunks))
+ all_chunks = dict(zip(da.dims, da.chunks, strict=False))
chunking = {}
for dim, minchunk in minchunks.items():
chunks = all_chunks[dim]
@@ -753,7 +752,7 @@ def _chunk_like(*inputs: xr.DataArray | xr.Dataset, chunks: dict[str, int] | Non
da.variable, xr.core.variable.IndexVariable
):
da = xr.DataArray(da, dims=da.dims, coords=da.coords, name=da.name)
- if not isinstance(da, (xr.DataArray, xr.Dataset)):
+ if not isinstance(da, xr.DataArray | xr.Dataset):
outputs.append(da)
else:
outputs.append(
diff --git a/xclim/ensembles/_base.py b/xclim/ensembles/_base.py
index 2514cb98f..36c5f9560 100644
--- a/xclim/ensembles/_base.py
+++ b/xclim/ensembles/_base.py
@@ -105,7 +105,7 @@ def create_ensemble(
"""
if isinstance(datasets, dict):
if realizations is None:
- realizations, datasets = zip(*datasets.items())
+ realizations, datasets = zip(*datasets.items(), strict=False)
else:
datasets = datasets.values()
elif isinstance(datasets, str) and realizations is not None:
diff --git a/xclim/ensembles/_robustness.py b/xclim/ensembles/_robustness.py
index fcfc81893..cfa9f1061 100644
--- a/xclim/ensembles/_robustness.py
+++ b/xclim/ensembles/_robustness.py
@@ -357,7 +357,7 @@ def robustness_categories(
robustness = (changed.copy() * 0).astype(int) + 99
# We go in reverse gear so that the first categories have precedence in the case of multiple matches.
for i, ((chg_op, agr_op), (chg_thresh, agr_thresh)) in reversed(
- list(enumerate(zip(ops, thresholds), 1))
+ list(enumerate(zip(ops, thresholds, strict=False), 1))
):
if not agr_op:
cond = compare(changed, chg_op, chg_thresh)
diff --git a/xclim/indices/_agro.py b/xclim/indices/_agro.py
index 1aa06b71f..38f939c29 100644
--- a/xclim/indices/_agro.py
+++ b/xclim/indices/_agro.py
@@ -415,7 +415,7 @@ def biologically_effective_degree_days(
lat = _gather_lat(tasmin)
if method.lower() == "gladstones":
- if isinstance(lat, (int, float)):
+ if isinstance(lat, int | float):
lat = xarray.DataArray(lat)
lat_mask = abs(lat) <= 50
k = 1 + xarray.where(
diff --git a/xclim/indices/_anuclim.py b/xclim/indices/_anuclim.py
index 463eea46a..14ef28afb 100644
--- a/xclim/indices/_anuclim.py
+++ b/xclim/indices/_anuclim.py
@@ -1,7 +1,8 @@
# noqa: D100
from __future__ import annotations
-from typing import Callable, cast
+from collections.abc import Callable
+from typing import cast
import numpy as np
import xarray
diff --git a/xclim/indices/_multivariate.py b/xclim/indices/_multivariate.py
index f0eec04d0..f10e6fae6 100644
--- a/xclim/indices/_multivariate.py
+++ b/xclim/indices/_multivariate.py
@@ -1,7 +1,8 @@
# noqa: D100
from __future__ import annotations
-from typing import Callable, cast
+from collections.abc import Callable
+from typing import cast
import numpy as np
import xarray
diff --git a/xclim/indices/fire/_cffwis.py b/xclim/indices/fire/_cffwis.py
index a770dbca9..6c3a74b4b 100644
--- a/xclim/indices/fire/_cffwis.py
+++ b/xclim/indices/fire/_cffwis.py
@@ -1172,7 +1172,7 @@ def fire_weather_ufunc( # noqa: C901
if len(outputs) == 1:
return {outputs[0]: das}
- return dict(zip(outputs, das))
+ return dict(zip(outputs, das, strict=False))
@declare_units(last_dc="[]", winter_pr="[length]")
diff --git a/xclim/indices/generic.py b/xclim/indices/generic.py
index 43bc85720..bc3d367de 100644
--- a/xclim/indices/generic.py
+++ b/xclim/indices/generic.py
@@ -8,8 +8,7 @@
from __future__ import annotations
import warnings
-from collections.abc import Sequence
-from typing import Callable
+from collections.abc import Callable, Sequence
import cftime
import numpy as np
@@ -214,7 +213,7 @@ def get_op(op: str, constrain: Sequence[str] | None = None) -> Callable:
raise ValueError(f"Operation `{op}` not recognized.")
constraints = []
- if isinstance(constrain, (list, tuple, set)):
+ if isinstance(constrain, list | tuple | set):
constraints.extend([binary_ops[c] for c in constrain])
constraints.extend(constrain)
elif isinstance(constrain, str):
diff --git a/xclim/indices/run_length.py b/xclim/indices/run_length.py
index 1c9e170ab..3332343b1 100644
--- a/xclim/indices/run_length.py
+++ b/xclim/indices/run_length.py
@@ -1633,7 +1633,7 @@ def suspicious_run_1d(
raise NotImplementedError(f"{op}")
out = np.zeros_like(arr, dtype=bool)
- for st, l in zip(pos[sus_runs], rl[sus_runs]): # noqa: E741
+ for st, l in zip(pos[sus_runs], rl[sus_runs], strict=False): # noqa: E741
out[st : st + l] = True # noqa: E741
return out
diff --git a/xclim/sdba/_adjustment.py b/xclim/sdba/_adjustment.py
index 50ba1f5f7..431df0195 100644
--- a/xclim/sdba/_adjustment.py
+++ b/xclim/sdba/_adjustment.py
@@ -8,8 +8,7 @@
from __future__ import annotations
-from collections.abc import Sequence
-from typing import Callable
+from collections.abc import Callable, Sequence
import numpy as np
import xarray as xr
@@ -993,7 +992,7 @@ def _otc_adjust(
for k, v in bin_width.items():
_bin_width[k] = v
bin_width = _bin_width
- elif isinstance(bin_width, (float, int)):
+ elif isinstance(bin_width, float | int):
bin_width = np.ones(X.shape[1]) * bin_width
if bin_origin is None:
@@ -1004,7 +1003,7 @@ def _otc_adjust(
for v, k in bin_origin.items():
_bin_origin[v] = k
bin_origin = _bin_origin
- elif isinstance(bin_origin, (float, int)):
+ elif isinstance(bin_origin, float | int):
bin_origin = np.ones(X.shape[1]) * bin_origin
num_iter_max = 100_000_000 if num_iter_max is None else num_iter_max
@@ -1196,7 +1195,7 @@ def _dotc_adjust(
for v, k in bin_width.items():
_bin_width[v] = k
bin_width = _bin_width
- elif isinstance(bin_width, (float, int)):
+ elif isinstance(bin_width, float | int):
bin_width = np.ones(X0.shape[1]) * bin_width
if isinstance(bin_origin, dict):
@@ -1204,7 +1203,7 @@ def _dotc_adjust(
for v, k in bin_origin.items():
_bin_origin[v] = k
bin_origin = _bin_origin
- elif isinstance(bin_origin, (float, int)):
+ elif isinstance(bin_origin, float | int):
bin_origin = np.ones(X0.shape[1]) * bin_origin
# Map ref to hist
diff --git a/xclim/sdba/base.py b/xclim/sdba/base.py
index 0f0be0dd6..5036058ef 100644
--- a/xclim/sdba/base.py
+++ b/xclim/sdba/base.py
@@ -5,9 +5,8 @@
from __future__ import annotations
-from collections.abc import Sequence
+from collections.abc import Callable, Sequence
from inspect import _empty, signature # noqa
-from typing import Callable
import dask.array as dsk
import jsonpickle
@@ -375,7 +374,7 @@ def apply(
function may add a "_group_apply_reshape" attribute set to `True` on the variables that should be reduced and
these will be re-grouped by calling `da.groupby(self.name).first()`.
"""
- if isinstance(da, (dict, xr.Dataset)):
+ if isinstance(da, dict | xr.Dataset):
grpd = self.group(main_only=main_only, **da)
dim_chunks = min( # Get smallest chunking to rechunk if the operation is non-grouping
[
@@ -601,7 +600,7 @@ def _map_blocks(ds, **kwargs): # noqa: C901
chunks = (
dict(ds.chunks)
if isinstance(ds, xr.Dataset)
- else dict(zip(ds.dims, ds.chunks))
+ else dict(zip(ds.dims, ds.chunks, strict=False))
)
badchunks = {}
if group is not None:
diff --git a/xclim/sdba/loess.py b/xclim/sdba/loess.py
index f20974efd..8cd143642 100644
--- a/xclim/sdba/loess.py
+++ b/xclim/sdba/loess.py
@@ -5,7 +5,7 @@
from __future__ import annotations
-from typing import Callable
+from collections.abc import Callable
from warnings import warn
import numba
diff --git a/xclim/sdba/processing.py b/xclim/sdba/processing.py
index 77c8422a7..af29e17eb 100644
--- a/xclim/sdba/processing.py
+++ b/xclim/sdba/processing.py
@@ -824,7 +824,7 @@ def unstack_variables(da: xr.DataArray, dim: str | None = None) -> xr.Dataset:
for name, attr_list in da[dim].attrs.items():
if not name.startswith("_"):
continue
- for attr, var in zip(attr_list, da[dim]):
+ for attr, var in zip(attr_list, da[dim], strict=False):
if attr is not None:
ds[var.item()].attrs[name[1:]] = attr
diff --git a/xclim/sdba/properties.py b/xclim/sdba/properties.py
index f5f61b1af..02232b6cb 100644
--- a/xclim/sdba/properties.py
+++ b/xclim/sdba/properties.py
@@ -912,7 +912,9 @@ def _bivariate_spell_stats(
conds = []
masks = []
- for da, thresh, op, method in zip([ds.da1, ds.da2], threshs, opss, methods):
+ for da, thresh, op, method in zip(
+ [ds.da1, ds.da2], threshs, opss, methods, strict=False
+ ):
masks.append(
~(da.isel({dim: 0}).isnull()).drop_vars(dim)
) # mask of the ocean with NaNs
diff --git a/xclim/sdba/utils.py b/xclim/sdba/utils.py
index 6b071fdbe..12af93139 100644
--- a/xclim/sdba/utils.py
+++ b/xclim/sdba/utils.py
@@ -6,7 +6,7 @@
from __future__ import annotations
import itertools
-from typing import Callable
+from collections.abc import Callable
from warnings import warn
import bottleneck as bn
@@ -757,7 +757,7 @@ def get_clusters_1d(
cl_maxval = []
cl_start = []
cl_end = []
- for start, end in zip(starts, ends):
+ for start, end in zip(starts, ends, strict=False):
cluster_max = data[start:end].max()
if cluster_max > u1:
cl_maxval.append(cluster_max)
diff --git a/xclim/testing/sdba_utils.py b/xclim/testing/sdba_utils.py
index e31306899..88353af5b 100644
--- a/xclim/testing/sdba_utils.py
+++ b/xclim/testing/sdba_utils.py
@@ -20,7 +20,7 @@
def series(values, name, start="2000-01-01"):
"""Create a DataArray with time, lon and lat dimensions."""
coords = collections.OrderedDict()
- for dim, n in zip(("time", "lon", "lat"), values.shape):
+ for dim, n in zip(("time", "lon", "lat"), values.shape, strict=False):
if dim == "time":
coords[dim] = pd.date_range(start, periods=n, freq="D")
else:
diff --git a/xclim/testing/utils.py b/xclim/testing/utils.py
index 12b91a0c9..e710f9314 100644
--- a/xclim/testing/utils.py
+++ b/xclim/testing/utils.py
@@ -223,7 +223,7 @@ def publish_release_notes(
-----
This function is used solely for development and packaging purposes.
"""
- if isinstance(changes, (str, Path)):
+ if isinstance(changes, str | Path):
changes_file = Path(changes).absolute()
else:
changes_file = Path(__file__).absolute().parents[2].joinpath("CHANGELOG.rst")
@@ -276,7 +276,7 @@ def publish_release_notes(
if not file:
return changes
- if isinstance(file, (Path, os.PathLike)):
+ if isinstance(file, Path | os.PathLike):
with open(file, "w", encoding="utf-8") as f:
print(changes, file=f)
else:
@@ -362,7 +362,7 @@ def show_versions(
if not file:
return message
- if isinstance(file, (Path, os.PathLike)):
+ if isinstance(file, Path | os.PathLike):
with open(file, "w", encoding="utf-8") as f:
print(message, file=f)
else: