diff --git a/.github/ci-hpc-config.yml b/.github/ci-hpc-config.yml index 5996da1..e425cde 100644 --- a/.github/ci-hpc-config.yml +++ b/.github/ci-hpc-config.yml @@ -1,14 +1,14 @@ build: python: '3.10' modules: - - ninja + - ninja dependencies: - - ecmwf/ecbuild@develop - - ecmwf/eccodes@develop + - ecmwf/ecbuild@develop + - ecmwf/eccodes@develop python_dependencies: - - ecmwf/eccodes-python@develop + - ecmwf/eccodes-python@develop env: - - ECCODES_SAMPLES_PATH=$ECCODES_DIR/share/eccodes/samples - - ECCODES_DEFINITION_PATH=$ECCODES_DIR/share/eccodes/definitions + - ECCODES_SAMPLES_PATH=$ECCODES_DIR/share/eccodes/samples + - ECCODES_DEFINITION_PATH=$ECCODES_DIR/share/eccodes/definitions parallel: 64 requirements: tests/downstream-ci-requirements.txt diff --git a/.github/workflows/cd-pypi.yml b/.github/workflows/cd-pypi.yml new file mode 100644 index 0000000..6104026 --- /dev/null +++ b/.github/workflows/cd-pypi.yml @@ -0,0 +1,11 @@ +name: cd + +on: + push: + tags: + - '**' + +jobs: + pypi: + uses: ecmwf-actions/reusable-workflows/.github/workflows/cd-pypi.yml@v2 + secrets: inherit diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8588c67..621ee74 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,24 +1,24 @@ name: ci - + on: # Trigger the workflow on push to master or develop, except tag creation push: branches: - - 'master' - - 'develop' + - 'main' + - 'develop' tags-ignore: - - '**' - + - '**' + # Trigger the workflow on pull request - pull_request: ~ - + pull_request: + # Trigger the workflow manually - workflow_dispatch: ~ - + workflow_dispatch: + # Trigger after public PR approved for CI pull_request_target: types: [labeled] - + jobs: # Run CI including downstream packages on self-hosted runners downstream-ci: @@ -29,25 +29,8 @@ jobs: pdbufr: ecmwf/pdbufr@${{ github.event.pull_request.head.sha || github.sha }} codecov_upload: true secrets: inherit - - # # Run CI of private downstream packages on self-hosted runners - # private-downstream-ci: - # name: private-downstream-ci - # needs: [downstream-ci] - # if: (success() || failure()) && ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} - # runs-on: ubuntu-latest - # permissions: - # pull-requests: write - # steps: - # - name: Dispatch private downstream CI - # uses: ecmwf-actions/dispatch-private-downstream-ci@v1 - # with: - # token: ${{ secrets.GH_REPO_READ_TOKEN }} - # owner: ecmwf-actions - # repository: private-downstream-ci - # event_type: downstream-ci - # payload: '{"metkit": "ecmwf/metkit@${{ github.event.pull_request.head.sha || github.sha }}"}' - + + # Build downstream packages on HPC downstream-ci-hpc: name: downstream-ci-hpc @@ -56,21 +39,3 @@ jobs: with: pdbufr: ecmwf/pdbufr@${{ github.event.pull_request.head.sha || github.sha }} secrets: inherit - - # # Run CI of private downstream packages on HPC - # private-downstream-ci-hpc: - # name: private-downstream-ci-hpc - # needs: [downstream-ci-hpc] - # if: (success() || failure()) && ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} - # runs-on: ubuntu-latest - # permissions: - # pull-requests: write - # steps: - # - name: Dispatch private downstream CI - # uses: ecmwf-actions/dispatch-private-downstream-ci@v1 - # with: - # token: ${{ secrets.GH_REPO_READ_TOKEN }} - # owner: ecmwf-actions - # repository: private-downstream-ci - # event_type: downstream-ci-hpc - # payload: '{"metkit": "ecmwf/metkit@${{ github.event.pull_request.head.sha || github.sha }}"}' \ No newline at end of file diff --git a/.github/workflows/label-public-pr-yml b/.github/workflows/label-public-pr-yml index bda6c82..59b2bfa 100644 --- a/.github/workflows/label-public-pr-yml +++ b/.github/workflows/label-public-pr-yml @@ -1,10 +1,10 @@ # Manage labels of pull requests that originate from forks name: label-public-pr - + on: pull_request_target: types: [opened, synchronize] - + jobs: label: uses: ecmwf-actions/reusable-workflows/.github/workflows/label-pr.yml@v2 diff --git a/.github/workflows/legacy-ci.yml b/.github/workflows/legacy-ci.yml new file mode 100644 index 0000000..2aa722e --- /dev/null +++ b/.github/workflows/legacy-ci.yml @@ -0,0 +1,207 @@ +name: legacy-ci + +on: + push: + branches: + - main + - develop + tags: + - "*" + pull_request: + branches: + - main + - develop + pull_request_target: + types: [labeled] + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -l {0} + +jobs: + pre-commit: + if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha || github.ref }} + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - uses: pre-commit/action@v3.0.0 + + + documentation: + if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha || github.ref }} + - name: Install Conda environment with Micromamba + uses: mamba-org/provision-with-micromamba@v12 + with: + environment-file: environment.yml + environment-name: DEVELOP + channels: conda-forge + cache-env: true + cache-env-key: ubuntu-latest-3.10 + extra-specs: | + python=3.10 + - name: Install package + run: | + python -m pip install --no-deps -e . + - name: Build documentation + run: | + make docs-build + + +# on: push + +# jobs: +# unit-tests: +# runs-on: ${{ matrix.os }}-latest +# strategy: +# max-parallel: 5 +# matrix: +# os: [ubuntu] +# python: ["3.7", "3.8", "3.9", "3.10"] +# extras: [''] +# include: +# - os: macos +# python: "3.8" +# - os: ubuntu +# python: "3.8" +# extras: -minimal +# # python-eccodes and cffi both fail +# # - os: windows +# # python: 3.8 + +# steps: +# - uses: actions/checkout@v2 +# - uses: mamba-org/setup-micromamba@v1 +# with: +# micromamba-version: '1.4.3-0' +# environment-name: ${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }} +# environment-file: tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml +# create-args: python=${{ matrix.python }} +# - name: Test with pytest +# shell: bash -l {0} +# run: | +# micromamba install pytest pytest-cov tomli +# pip install --no-deps -e . +# pytest -v --cov=. --cov-report=xml -k 'not test_notebooks' . +# - name: Upload coverage to Codecov +# uses: codecov/codecov-action@v1 +# - name: Install test tools for notebooks +# shell: bash -l {0} +# run: | +# micromamba install pytest nbformat nbconvert ipykernel +# pytest -v -k 'test_notebooks' . + +# docs: +# runs-on: ubuntu-latest + +# steps: +# - uses: actions/checkout@v2 +# - uses: mamba-org/setup-micromamba@v1 +# with: +# micromamba-version: '1.4.3-0' +# environment-name: ubuntu-3.8 +# environment-file: tests/environment-ubuntu-3.8.yml +# create-args: python=${{ matrix.python }} +# - name: Build documentation with Sphinx +# shell: bash -l {0} +# run: | +# micromamba install sphinx +# micromamba install sphinx_rtd_theme -c conda-forge +# micromamba install ipykernel +# micromamba install pandoc +# micromamba install nbsphinx +# micromamba install ipython_genutils +# micromamba install jinja2=3.0.3 +# pip install --no-deps -e . +# python setup.py build_sphinx +# # - name: Test README with pytest +# # shell: bash -l {0} +# # run: | +# # conda install pytest pytest-cov tomli +# # pip install --no-deps -e . +# # pytest -v --cov=. --cov-report=xml README.rst + +# code-quality: +# runs-on: ubuntu-latest + +# steps: +# - uses: actions/checkout@v2 +# - uses: mamba-org/setup-micromamba@v1 +# with: +# micromamba-version: '1.4.3-0' +# environment-name: ubuntu-3.8 +# environment-file: tests/environment-ubuntu-3.8.yml +# create-args: python=3.8 +# - name: Lint with flake8 +# shell: bash -l {0} +# run: | +# micromamba install flake8 +# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics +# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics +# - name: Type check with mypy +# shell: bash -l {0} +# run: | +# micromamba install mypy pytest +# mypy --strict . + +# code-style: +# runs-on: ubuntu-latest + +# steps: +# - uses: actions/checkout@v2 +# - uses: mamba-org/setup-micromamba@v1 +# with: +# micromamba-version: '1.4.3-0' +# environment-name: ubuntu-3.8 +# environment-file: tests/environment-ubuntu-3.8.yml +# create-args: python=3.8 +# - name: Check code style with black +# shell: bash -l {0} +# run: | +# micromamba install black +# black --check . +# - name: Check code style with isort +# shell: bash -l {0} +# run: | +# micromamba install isort +# isort --check . + +# deploy: +# runs-on: ubuntu-latest + +# steps: +# - uses: actions/checkout@v2 +# - uses: mamba-org/setup-micromamba@v1 +# with: +# micromamba-version: '1.4.3-0' +# environment-name: ubuntu-3.8 +# environment-file: tests/environment-ubuntu-3.8.yml +# create-args: python=3.8 +# - name: Check MANIFEST.in +# shell: bash -l {0} +# run: | +# micromamba install -c conda-forge check-manifest +# check-manifest . +# - name: Build distributions +# shell: bash -l {0} +# run: | +# micromamba install pip setuptools wheel +# python setup.py sdist bdist_wheel diff --git a/.github/workflows/on-push.yml b/.github/workflows/on-push.yml deleted file mode 100644 index 594b9e5..0000000 --- a/.github/workflows/on-push.yml +++ /dev/null @@ -1,141 +0,0 @@ -name: on-push - -on: [push, pull_request] - -jobs: - unit-tests: - runs-on: ${{ matrix.os }}-latest - strategy: - max-parallel: 5 - matrix: - os: [ubuntu] - python: ["3.7", "3.8", "3.9", "3.10"] - extras: [''] - include: - - os: macos - python: "3.8" - - os: ubuntu - python: "3.8" - extras: -minimal -# python-eccodes and cffi both fail -# - os: windows -# python: 3.8 - - steps: - - uses: actions/checkout@v2 - - uses: mamba-org/setup-micromamba@v1 - with: - micromamba-version: '1.4.3-0' - environment-name: ${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }} - environment-file: tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml - create-args: python=${{ matrix.python }} - - name: Test with pytest - shell: bash -l {0} - run: | - micromamba install pytest pytest-cov tomli - pip install --no-deps -e . - pytest -v --cov=. --cov-report=xml -k 'not test_notebooks' . - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - - name: Install test tools for notebooks - shell: bash -l {0} - run: | - micromamba install pytest nbformat nbconvert ipykernel - pytest -v -k 'test_notebooks' . - - docs: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: mamba-org/setup-micromamba@v1 - with: - micromamba-version: '1.4.3-0' - environment-name: ubuntu-3.8 - environment-file: tests/environment-ubuntu-3.8.yml - create-args: python=${{ matrix.python }} - - name: Build documentation with Sphinx - shell: bash -l {0} - run: | - micromamba install sphinx - micromamba install sphinx_rtd_theme -c conda-forge - micromamba install ipykernel - micromamba install pandoc - micromamba install nbsphinx - micromamba install ipython_genutils - micromamba install jinja2=3.0.3 - pip install --no-deps -e . - python setup.py build_sphinx - # - name: Test README with pytest - # shell: bash -l {0} - # run: | - # conda install pytest pytest-cov tomli - # pip install --no-deps -e . - # pytest -v --cov=. --cov-report=xml README.rst - - code-quality: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: mamba-org/setup-micromamba@v1 - with: - micromamba-version: '1.4.3-0' - environment-name: ubuntu-3.8 - environment-file: tests/environment-ubuntu-3.8.yml - create-args: python=3.8 - - name: Lint with flake8 - shell: bash -l {0} - run: | - micromamba install flake8 - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Type check with mypy - shell: bash -l {0} - run: | - micromamba install mypy pytest - mypy --strict . - - code-style: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: mamba-org/setup-micromamba@v1 - with: - micromamba-version: '1.4.3-0' - environment-name: ubuntu-3.8 - environment-file: tests/environment-ubuntu-3.8.yml - create-args: python=3.8 - - name: Check code style with black - shell: bash -l {0} - run: | - micromamba install black - black --check . - - name: Check code style with isort - shell: bash -l {0} - run: | - micromamba install isort - isort --check . - - deploy: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - uses: mamba-org/setup-micromamba@v1 - with: - micromamba-version: '1.4.3-0' - environment-name: ubuntu-3.8 - environment-file: tests/environment-ubuntu-3.8.yml - create-args: python=3.8 - - name: Check MANIFEST.in - shell: bash -l {0} - run: | - micromamba install -c conda-forge check-manifest - check-manifest . - - name: Build distributions - shell: bash -l {0} - run: | - micromamba install pip setuptools wheel - python setup.py sdist bdist_wheel diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml deleted file mode 100644 index 20321c0..0000000 --- a/.github/workflows/weekly.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: weekly - -on: - schedule: - - cron: '0 9 * * 1' - workflow_dispatch: - -jobs: - environment: - runs-on: ${{ matrix.os }}-latest - strategy: - max-parallel: 5 - fail-fast: false - matrix: - os: [ubuntu] - python: ["3.7", "3.8", "3.9", "3.10"] - extras: [''] - include: - - os: macos - python: "3.8" - - os: windows - python: "3.8" - - os: ubuntu - python: "3.8" - extras: -minimal - - steps: - - uses: actions/checkout@v2 - - uses: conda-incubator/setup-miniconda@v2 - with: - auto-update-conda: true - python-version: ${{ matrix.python }} - activate-environment: ${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }} - environment-file: environment${{ matrix.extras }}.in.yml - - name: Export concrete dependencies - shell: bash -l {0} - run: | - conda env export --no-build -f tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml - git diff - - name: Archive environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml - uses: actions/upload-artifact@v2 - with: - name: environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml - path: tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml diff --git a/.gitignore b/.gitignore index 16c6d6f..a0cfe38 100644 --- a/.gitignore +++ b/.gitignore @@ -88,8 +88,8 @@ _dev *.sublime-workspace .vscode -# mac +# mac .DS_Store # docs -docs/examples/*.bufr \ No newline at end of file +docs/examples/*.bufr diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..a135805 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,53 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-json + - id: check-yaml + - id: check-toml + # - id: check-added-large-files + - id: debug-statements + - id: mixed-line-ending +- repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort +- repo: https://github.com/psf/black + rev: 23.9.1 + hooks: + - id: black +- repo: https://github.com/keewis/blackdoc + rev: v0.3.8 + hooks: + - id: blackdoc + additional_dependencies: [black==23.3.0] +- repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 +- repo: https://github.com/executablebooks/mdformat + rev: 0.7.14 + hooks: + - id: mdformat + exclude: cruft-update-template.md +- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.11.0 + hooks: + - id: pretty-format-yaml + args: [--autofix, --preserve-quotes] + - id: pretty-format-toml + args: [--autofix] +- repo: https://github.com/PyCQA/pydocstyle.git + rev: 6.1.1 + hooks: + - id: pydocstyle + additional_dependencies: [toml] + exclude: tests|docs +# - repo: https://github.com/pre-commit/mirrors-mypy +# rev: v1.10.0 +# hooks: +# - id: mypy +# types: [python] +# args: [--strict] diff --git a/MANIFEST.in b/MANIFEST.in index 32f42de..5783562 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,4 +10,3 @@ recursive-include tests *.yml recursive-include tests *.csv recursive-include docs * recursive-include tests *.txt - diff --git a/Makefile b/Makefile index fb45093..3009203 100644 --- a/Makefile +++ b/Makefile @@ -1,40 +1,63 @@ environment := PDBUFR +setup: + pre-commit install + default: @echo No default -code-quality: - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - mypy --strict . - -code-style: - black . - isort . +qa: + pre-commit run --all-files +# mypy --strict . -tests: +unit-tests: pytest -v --cov=. --cov-report=html README.rst tests -deploy: - check-manifest . - python setup.py sdist bdist_wheel +conda-env-update: + $(CONDA) env update $(CONDAFLAGS) -f environment.yml + +docker-build: + docker build -t $(PROJECT) . + +docker-run: + docker run --rm -ti -v $(PWD):/srv $(PROJECT) + +template-update: + pre-commit run --all-files cruft -c .pre-commit-config-weekly.yaml + +docs-build: + cd docs && rm -fr _api && make clean && make html + + + +# tests: +# pytest -v --cov=. --cov-report=html README.rst tests + +# deploy: +# check-manifest . +# python setup.py sdist bdist_wheel + +# env-create: +# conda env create -n $(environment) -f environment.in.yml +# conda install -n $(environment) -y pytest pytest-cov black flake8 mypy isort wheel +# conda install -n $(environment) -c conda-forge -y check-manifest + +# env-update: +# conda env update -n $(environment) -f environment.in.yml -env-create: - conda env create -n $(environment) -f environment.in.yml - conda install -n $(environment) -y pytest pytest-cov black flake8 mypy isort wheel - conda install -n $(environment) -c conda-forge -y check-manifest +# testclean: +# $(RM) -r */__pycache__ .coverage .cache tests/.ipynb_checkpoints *.lprof -env-update: - conda env update -n $(environment) -f environment.in.yml +# clean: testclean +# $(RM) -r */*.pyc htmlcov dist build .eggs -testclean: - $(RM) -r */__pycache__ .coverage .cache tests/.ipynb_checkpoints *.lprof +# distclean: clean +# $(RM) -r *.egg-info -clean: testclean - $(RM) -r */*.pyc htmlcov dist build .eggs -distclean: clean - $(RM) -r *.egg-info +# docs-build: +# cd docs && rm -fr _api && make clean && make html -.PHONY: code-quality code-style tests env-create env-update +# .PHONY: code-quality code-style tests env-create env-update diff --git a/docs/_static/flat_dump_output.txt b/docs/_static/flat_dump_output.txt index aad2003..0ae398f 100644 --- a/docs/_static/flat_dump_output.txt +++ b/docs/_static/flat_dump_output.txt @@ -2,4 +2,4 @@ 0 1 71 907 NaN NaN NaN NaN 1 1 71 823 221.5 191.5 NaN NaN -[2 rows x 197 columns] \ No newline at end of file +[2 rows x 197 columns] diff --git a/docs/_static/h_dump_output.txt b/docs/_static/h_dump_output.txt index 83ee73f..1b4c9a8 100644 --- a/docs/_static/h_dump_output.txt +++ b/docs/_static/h_dump_output.txt @@ -7,4 +7,4 @@ 47 53.75 -73.67 23200.0 223.1 48 53.75 -73.67 20500.0 221.5 -[48 rows x 4 columns] \ No newline at end of file +[48 rows x 4 columns] diff --git a/docs/examples.rst b/docs/examples.rst index f77d0dd..d5e9201 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -16,4 +16,3 @@ Here is a list of example notebooks to illustrate how to extract BUFR data using examples/synop examples/tropical_cyclone examples/flat_dump - diff --git a/docs/index.rst b/docs/index.rst index a287a8f..c363af2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,7 +16,7 @@ Welcome to pdbufr's documentation :caption: Examples :titlesonly: - examples + examples .. toctree:: :maxdepth: 1 @@ -38,6 +38,6 @@ Indices and tables ================== * :ref:`genindex` - + .. * :ref:`modindex` .. * :ref:`search` diff --git a/docs/licence.rst b/docs/licence.rst index 0834bc8..f4efb6a 100644 --- a/docs/licence.rst +++ b/docs/licence.rst @@ -10,4 +10,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file +limitations under the License. diff --git a/docs/read_bufr.rst b/docs/read_bufr.rst index 88b9bd0..f55807f 100644 --- a/docs/read_bufr.rst +++ b/docs/read_bufr.rst @@ -4,11 +4,11 @@ read_bufr .. py:function:: read_bufr(path, columns=[], filters={}, required_columns=True, flat=False) Extract data from BUFR as a pandas.DataFrame with the specified ``columns`` applying the ``filters`` either in :ref:`hierarchical ` or :ref:`flat ` mode. - + :param path: path to the BUFR file or a :ref:`message_list_object` :type path: str, bytes, os.PathLike or a :ref:`message_list_object` - :param columns: a list of ecCodes BUFR keys to extract for each BUFR message/subset. When ``flat`` is True ``columns`` must be one of the following string values: - + :param columns: a list of ecCodes BUFR keys to extract for each BUFR message/subset. When ``flat`` is True ``columns`` must be one of the following string values: + * "all", empty str or empty list (default): all the columns are extracted * "header": only the columns from the header section are extracted * "data": only the columns from the data section are extracted @@ -19,12 +19,12 @@ read_bufr :param required_columns: the list of ecCodes BUFR keys that are required to be present in the BUFR message/subset. Bool values are interpreted as follows: * if ``flat`` is False: - + * True means all the keys in ``columns`` are required * False means no columns are required * if ``flat`` is True either bool value means no columns are required - + :type required_columns: bool, iterable[str] :param flat: enables flat extraction mode. When it is ``True`` each message/subset is treated as a :ref:`flat list `, while when it is ``False`` (default), data is extracted as if the message had a :ref:`tree-like hierarchy `. See details below. New in *version 0.10.0* :type flat: bool @@ -33,31 +33,31 @@ read_bufr In order to correctly use :func:`read_bufr` for a given BUFR file first you need to understand the structure of the messages and the keys/values you can use for data extraction and filter definition. The BUFR structure can be explored with *ecCodes* command line tools `bufr_ls `_ and `bufr_dump `_. You can also use `CodesUI `_ or `Metview `_, which provide graphical user interfaces to inspect BUFR/GRIB data. - There are some :ref:`notebook examples ` available demonstrating how to use :func:`read_bufr` for various observation/forecast BUFR data types. + There are some :ref:`notebook examples ` available demonstrating how to use :func:`read_bufr` for various observation/forecast BUFR data types. -BUFR keys +BUFR keys ----------- ecCodes keys from both the BUFR header and data sections are supported in ``columns``, ``filters`` and ``required_columns``. However, there are some limitations: - + * keys containing the rank e.g. "#1#latitude" cannot be used * key attributes e.g. "latitude->code" cannot be used - + The "count" generated key, which refers to the message index, is also supported but please note that message indexing starts at 1 and not at 0! - + There is also a set of **computed keys** that can be used for :func:`read_bufr`: * "data_datetime" (datetime.datetime): generated from the "year", "month", "day", "hour", "minute", "second" keys in the BUFR data section. * "typical_datetime" (datetime.datetime): generated from the "typicalYear", "typicalMonth", "typicalDay", "typicalHour", "typicalMinute", "typicalSecond" keys in the BUFR header section. - * "WMO_station_id": generated from the "blockNumber" and "stationNumber" keys as:: - + * "WMO_station_id": generated from the "blockNumber" and "stationNumber" keys as:: + blockNumber*1000+stationNumber * "geometry": values extracted as a list of:: - + [longitude,latitude,heightOfStationGroundAboveMeanSeaLevel] - + as required for geopandas. * "CRS": generated from the "coordinateReferenceSystem" key using the following mapping: @@ -78,7 +78,7 @@ BUFR keys * - 3 - EPSG:4314 - + * - 4 or 5 - not supported @@ -102,35 +102,35 @@ Single value A filter condition can be a single value match: - .. code-block:: python + .. code-block:: python - filters={"blockNumber": 12} + filters = {"blockNumber": 12} List of values ++++++++++++++ - A list of values specifies an "in" relation: + A list of values specifies an "in" relation: + + .. code-block:: python + + filters = {"stationNumber": [843, 925]} + filters = {"blockNumber": range(10, 13)} - .. code-block:: python - - filters={"stationNumber": [843, 925]} - filters={"blockNumber": range(10, 13)} - Slices ++++++++ - + Intervals can be expressed as a ``slice`` (the boundaries as inclusive): .. code-block:: python - - # closed interval (>=273.16 and <=293.16) - filters={"airTemperature": slice(273.16, 293.16)} - # open interval (<=273.16) - filters={"airTemperature": slice(None, 273.16)} + # closed interval (>=273.16 and <=293.16) + filters = {"airTemperature": slice(273.16, 293.16)} + + # open interval (<=273.16) + filters = {"airTemperature": slice(None, 273.16)} - # open interval (>=273.16) - filters={"airTemperature": slice(273.16, None)} + # open interval (>=273.16) + filters = {"airTemperature": slice(273.16, None)} Callables @@ -139,72 +139,79 @@ Callables We can even use a ``callable`` condition. This example uses a lambda expression to filter values in a certain range: .. code-block:: python - - filters={"airTemperature": lambda x: x > 250 and x <= 300} - + filters = {"airTemperature": lambda x: x > 250 and x <= 300} + + The same task can also be achieved by using a function: .. code-block:: python - - def filter_temp(t): - return t > 250 and t <= 300 - df = pdbufr.read_bufr("temp.bufr", - columns=("latitude", "longitude", "airTemperature"), - filters={"airTemperature": filter_temp}, - ) + def filter_temp(t): + return t > 250 and t <= 300 + + + df = pdbufr.read_bufr( + "temp.bufr", + columns=("latitude", "longitude", "airTemperature"), + filters={"airTemperature": filter_temp}, + ) Combining conditions +++++++++++++++++++++ When multiple conditions are specified they are connected with a logical AND: - + .. code-block:: python - - filters={"blockNumber": 12, - "stationNumber": [843, 925], - "airTemperature": slice(273.16, 293.16)} + + filters = { + "blockNumber": 12, + "stationNumber": [843, 925], + "airTemperature": slice(273.16, 293.16), + } A ``geographical filter`` can be defined like this: .. code-block:: python - - # locations in the 40W,10S - 30E,20N area - filters={"latitude": slice(-10, 20), - "longitude": slice(-40, 30)} + + # locations in the 40W,10S - 30E,20N area + filters = {"latitude": slice(-10, 20), "longitude": slice(-40, 30)} while the following expression can be used as a ``temporal filter``: .. code-block:: python - - filters={"data_datetime": - slice(datetime.datetime(2009,1,23,13,0), - datetime.datetime(2009,1,23,13,1))} - + filters = { + "data_datetime": slice( + datetime.datetime(2009, 1, 23, 13, 0), + datetime.datetime(2009, 1, 23, 13, 1), + ) + } + + .. _tree-mode-section: Hierarchical mode ------------------- - + When ``flat`` is ``False`` the contents of a BUFR message/subset is interpreted as a hierarchical structure. This is based on a certain group of BUFR keys (related to instrumentation, location etc), which according to the `WMO BUFR manual `_ introduce a new hierarchy level in the message/susbset. During data extraction ``read_bufr`` traverses this hierarchy and when all the ``columns`` are collected and the all the ``filters`` match a new record is added to the output. With this several records can be extracted from the same message/subset. **Example** - + In this example we extract values from a classic radiosonde observation BUFR file. Here each message contains a single location ("latitude", "longitude") with several pressure levels of temperature, dewpoint etc. The message hierarchy is shown in the following snapshot: .. image:: /_static/temp_structure.png - :width: 450px + :width: 450px To extract the temperature profile for the first two stations we can use this code: .. code-block:: python - df = pdbufr.read_bufr("temp.bufr", - columns=("latitude", "longitude", "pressure", "airTemperature"), - filters={"count": [1, 2]}, + df = pdbufr.read_bufr( + "temp.bufr", + columns=("latitude", "longitude", "pressure", "airTemperature"), + filters={"count": [1, 2]}, ) which results in the following DataFrame: @@ -220,9 +227,9 @@ Flat mode New in *version 0.10.0* When ``flat`` is ``True`` messages/subsets are extracted as a whole preserving the column order (see the note below for exceptions) and each extracted message/subset will be a separate record in the resulting DataFrame. - + With ``filters`` we can control which messages/subsets should be selected. By default, all the columns in a message/subset are extracted (see the exceptions below), but this can be changed by setting ``columns`` to "header" or "data" to get only the header or data section keys. Other column selection modes are not available. - + In the resulting DataFrame the original ecCodes keys containing the **rank** are used as column names, e.g. "#1#latitude" instead of "latitude". The following set of keys are omitted: * from the header: "unexpandedDescriptors" @@ -240,7 +247,7 @@ Flat mode and there is e.g. a value "#12#pressure" = 50000 in the message/subset then the filter matches. * for **computed keys** the filter condition matches if there is a match for the involved keys at their first occurrence (e.i. rank=1) in the message/subset. E.g:: - + filters = {"WMO_station_id": 12925} matches if "#1#blockNumber" = 12 and "#1#stationNumber" = 925 in the message/subset (remember WMO_station_id=blockNumber*1000+stationNumber) @@ -249,17 +256,19 @@ Flat mode Messages/subsets in a BUFR file can have a different set of BUFR keys. When a new message/subset is processed :func:`read_bufr` adds it to the resulting DataFrame as a new record and columns that are not yet present in the output are automatically appended by Pandas to the end changing the original order of keys for that message. When this happens :func:`pdbufr` prints a warning message to the stdout (see the example below or the :ref:`/examples/flat_dump.ipynb` notebook for details). - + **Example** We use the same radiosonde BUFR file as for the :ref:`hierarchical mode ` example above. To extract all the data values for the first two stations we can use this code: .. code-block:: python - - df = pdbufr.read_bufr("temp.bufr", columns="data", - flat=True - filters={"count": [1, 2]}, - ) + + df = pdbufr.read_bufr( + "temp.bufr", + columns="data", + flat=True, + filters={"count": [1, 2]}, + ) which results in the following DataFrame: @@ -268,21 +277,24 @@ Flat mode and generates the following warning:: Warning: not all BUFR messages/subsets have the same structure in the input file. - Non-overlapping columns (starting with column[189] = #1#generatingApplication) + Non-overlapping columns (starting with column[189] = #1#generatingApplication) were added to end of the resulting dataframe altering the original column order for these messages. This warning can be disabled by using the **warnings** module. The code below produces the same DataFrame as the one above but does not print the warning message: .. code-block:: python - - import warnings - warnings.filterwarnings("ignore", module="pdbufr") - - df = pdbufr.read_bufr("temp.bufr", columns="data", - flat=True - filters={"count": [1, 2]}, - ) + + import warnings + + warnings.filterwarnings("ignore", module="pdbufr") + + df = pdbufr.read_bufr( + "temp.bufr", + columns="data", + flat=True, + filters={"count": [1, 2]}, + ) .. note:: diff --git a/docs/release_notes/index.rst b/docs/release_notes/index.rst index 781b397..7fa42d6 100644 --- a/docs/release_notes/index.rst +++ b/docs/release_notes/index.rst @@ -3,6 +3,6 @@ Release notes .. toctree:: :maxdepth: 1 - + version_0.11_updates version_0.10_updates diff --git a/docs/release_notes/version_0.11_updates.rst b/docs/release_notes/version_0.11_updates.rst index 8cad254..ea4ba07 100644 --- a/docs/release_notes/version_0.11_updates.rst +++ b/docs/release_notes/version_0.11_updates.rst @@ -8,4 +8,3 @@ Version 0.11.0 - added the ability to pass a :ref:`message list object ` to :func:`read_bufr` (`#61 `_) - use micromamba instead of conda to set up ci environments - diff --git a/environment-minimal.in.yml b/environment-minimal.in.yml deleted file mode 100644 index e50445f..0000000 --- a/environment-minimal.in.yml +++ /dev/null @@ -1,8 +0,0 @@ -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - numpy diff --git a/environment.in.yml b/environment.in.yml deleted file mode 100644 index 2134f83..0000000 --- a/environment.in.yml +++ /dev/null @@ -1,10 +0,0 @@ -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas diff --git a/environment.yml b/environment.yml index 0628f81..a318db8 100644 --- a/environment.yml +++ b/environment.yml @@ -1,8 +1,23 @@ +name: pdbufr channels: - - defaults - - conda-forge +- conda-forge +- nodefaults dependencies: - - eccodes - - pip - - pip: - - pdbufr +- attrs +- pip +- numpy +- pandas +- make +- mypy +- myst-parser +- pre-commit +- pydata-sphinx-theme +- pytest +- pytest-cov +- sphinx +- sphinx_rtd_theme +- sphinxcontrib-apidoc +- nbformat +- nbconvert +- nbsphinx +- ipykernel diff --git a/mypy.ini b/mypy.ini index 8258fe0..559c3e2 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,3 +1,3 @@ [mypy] [mypy-pdbufr.high_level_bufr.*] -ignore_errors = True \ No newline at end of file +ignore_errors = True diff --git a/pdbufr/bufr_filters.py b/pdbufr/bufr_filters.py index 872f2b0..80d476e 100644 --- a/pdbufr/bufr_filters.py +++ b/pdbufr/bufr_filters.py @@ -9,7 +9,7 @@ import logging import typing as T -import attr +import attr # type: ignore LOG = logging.getLogger(__name__) diff --git a/pdbufr/bufr_read.py b/pdbufr/bufr_read.py index faa140f..d72c955 100644 --- a/pdbufr/bufr_read.py +++ b/pdbufr/bufr_read.py @@ -27,7 +27,6 @@ def read_bufr( """ Read selected observations from a BUFR file into DataFrame. """ - if isinstance(path_or_messages, (str, bytes, os.PathLike)): with BufrFile(path_or_messages) as bufr_file: # type: ignore return _read_bufr( @@ -88,7 +87,12 @@ def __init__(self) -> None: ori_formatwarning = warnings.formatwarning warnings.formatwarning = lambda msg, *args, **kwargs: f"Warning: {msg}\n" warnings.warn( - f"not all BUFR messages/subsets have the same structure in the input file. Non-overlapping columns (starting with column[{column_info.first_count-1}] = {df.columns[column_info.first_count-1]}) were added to end of the resulting dataframe altering the original column order for these messages." + ( + "not all BUFR messages/subsets have the same structure in the input file. " + "Non-overlapping columns (starting with column[{column_info.first_count-1}] =" + f"{df.columns[column_info.first_count-1]}) were added to end of the resulting dataframe" + "altering the original column order for these messages." + ) ) warnings.formatwarning = ori_formatwarning diff --git a/pdbufr/bufr_structure.py b/pdbufr/bufr_structure.py index eb3210a..eab8c96 100644 --- a/pdbufr/bufr_structure.py +++ b/pdbufr/bufr_structure.py @@ -10,9 +10,9 @@ import datetime import typing as T -import attr +import attr # type: ignore import eccodes # type: ignore -import numpy as np +import numpy as np # type: ignore from pdbufr.high_level_bufr.bufr import bufr_code_is_coord @@ -65,7 +65,7 @@ def from_key(cls, key: str) -> "UncompressedBufrKey": rank = int(rank_text[1:]) else: rank = 0 - except: + except Exception: rank = 0 return cls(rank, 0, name) @@ -91,7 +91,8 @@ def relative_key(self) -> str: class MessageWrapper: """Makes it possible to use context manager and is_coord method for all - types of messages.""" + types of messages. + """ WRAP: T.Dict[T.Any, T.Any] = {} @@ -174,7 +175,7 @@ def check(self, key: str, name: str) -> bool: c = self.message.is_coord(key) self.cache[name] = c return c - except: + except Exception: return False return c @@ -653,7 +654,6 @@ def stream_bufr( ``True`` means all ``columns`` are required (default ``True``) :param prefilter_headers: filter the header keys before unpacking the data section (default ``False``) """ - if isinstance(columns, str): columns = (columns,) @@ -742,10 +742,10 @@ def stream_bufr_flat( elif len(columns) == 0 or columns[0] == "": columns = ("all",) elif len(columns) != 1: - raise ValueError(f"when columns is an iterable it can have maximum 1 element") + raise ValueError("when columns is an iterable it can have maximum 1 element") if columns[0] not in ["all", "header", "data"]: - raise ValueError(f"columns must be all, header or data") + raise ValueError("columns must be all, header or data") add_header = columns[0] in ["all", "header"] add_data = columns[0] in ["all", "data"] diff --git a/pdbufr/high_level_bufr/bufr.py b/pdbufr/high_level_bufr/bufr.py index 4145216..cf86215 100644 --- a/pdbufr/high_level_bufr/bufr.py +++ b/pdbufr/high_level_bufr/bufr.py @@ -20,7 +20,7 @@ def bufr_code_is_coord(code) -> bool: try: return code <= 9999 - except: + except Exception: return int(code[:3]) < 10 @@ -102,9 +102,9 @@ def is_coord(self, key): c = self._get(key + "->code", int) try: return bufr_code_is_coord(c) - except: + except Exception: return False - except: + except Exception: return False diff --git a/pdbufr/high_level_bufr/codesfile.py b/pdbufr/high_level_bufr/codesfile.py index e3d9d95..925d7ae 100644 --- a/pdbufr/high_level_bufr/codesfile.py +++ b/pdbufr/high_level_bufr/codesfile.py @@ -14,7 +14,6 @@ class CodesFile(io.FileIO): - """ An abstract class to specify and/or implement common behaviour that files read by ecCodes should implement. @@ -24,15 +23,16 @@ class CodesFile(io.FileIO): Individual messages can be accessed using the ``next`` method. Of course, it is also possible to iterate over each message in the file:: - >>> with {classname}(filename) as {alias}: + >>> with {classname}(filename) as alias: ... # Print number of messages in file - ... len({alias}) + ... len(alias) ... # Open all messages in file - ... for msg in {alias}: + ... for msg in alias: ... print(msg[key_name]) - ... len({alias}.open_messages) + ... len(alias.open_messages) + ... >>> # When the file is closed, any open messages are closed - >>> len({alias}.open_messages) + >>> len(alias.open_messages) """ #: Type of messages belonging to this file diff --git a/pdbufr/high_level_bufr/codesmessage.py b/pdbufr/high_level_bufr/codesmessage.py index 9070b63..3f48f2e 100644 --- a/pdbufr/high_level_bufr/codesmessage.py +++ b/pdbufr/high_level_bufr/codesmessage.py @@ -25,7 +25,6 @@ def raise_keyerror(key): class CodesMessage(object): - """ An abstract class to specify and/or implement common behaviour that messages read by ecCodes should implement. @@ -47,9 +46,9 @@ class CodesMessage(object): Usage:: - >>> with {parent}(filename) as {alias}: + >>> with {parent}(filename) as alias: ... # Access a key from each message - ... for msg in {alias}: + ... for msg in alias: ... print(msg[key_name]) ... # Report number of keys in message ... len(msg) @@ -71,6 +70,7 @@ class CodesMessage(object): ... msg2 = {classname}(clone=msg) ... # If desired, messages can be closed manually or used in with ... msg.close() + ... """ #: ecCodes enum-like PRODUCT constant diff --git a/pyproject.toml b/pyproject.toml index 69dea0c..b184c58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,12 @@ -[tool.isort] -profile = "black" - -[tool.pytest.ini_options] -norecursedirs = [ - "build", - "docs", - ".tox", -] +[build-system] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] [tool.coverage.run] branch = true -omit = ["setup.py"] + +[tool.isort] +profile = "black" + +[tool.pydocstyle] +add_ignore = ["D1", "D200", "D205", "D400", "D401", "D403"] +convention = "numpy" diff --git a/setup.cfg b/setup.cfg index cbe90fc..4b61401 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,7 @@ [zest.releaser] python-file-with-version = pdbufr/__init__.py + [flake8] -max-line-length = 88 +max-line-length = 110 +extend-ignore = E203, W503 diff --git a/setup.py b/setup.py index de5bee7..7f96c44 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ def parse_version_from(path: str) -> str: url="https://github.com/ecmwf/pdbufr", packages=setuptools.find_packages(), include_package_data=True, - install_requires=["attrs", "eccodes", "pandas", "hypothesis"], + install_requires=["attrs", "eccodes", "pandas"], extras_require={"tests": ["flake8", "pytest", "pytest-cov", "requests"]}, zip_safe=True, keywords="eccodes bufr pandas", diff --git a/tests/downstream-ci-requirements.txt b/tests/downstream-ci-requirements.txt index 663bd1f..2f30380 100644 --- a/tests/downstream-ci-requirements.txt +++ b/tests/downstream-ci-requirements.txt @@ -1 +1,7 @@ -requests \ No newline at end of file +requests +# for testing +pytest +pytest-cov +nbformat +nbconvert +ipykernel diff --git a/tests/environment-macos-3.8.yml b/tests/environment-macos-3.8.yml deleted file mode 100644 index c292b3d..0000000 --- a/tests/environment-macos-3.8.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: macos-3.8 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/local/miniconda/envs/macos-3.8 diff --git a/tests/environment-ubuntu-3.10.yml b/tests/environment-ubuntu-3.10.yml deleted file mode 100644 index ec7250a..0000000 --- a/tests/environment-ubuntu-3.10.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.10 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.10 diff --git a/tests/environment-ubuntu-3.6.yml b/tests/environment-ubuntu-3.6.yml deleted file mode 100644 index cadf3ce..0000000 --- a/tests/environment-ubuntu-3.6.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.6 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.6 diff --git a/tests/environment-ubuntu-3.7.yml b/tests/environment-ubuntu-3.7.yml deleted file mode 100644 index 3194c54..0000000 --- a/tests/environment-ubuntu-3.7.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.7 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.7 diff --git a/tests/environment-ubuntu-3.8-minimal.yml b/tests/environment-ubuntu-3.8-minimal.yml deleted file mode 100644 index 9f974a2..0000000 --- a/tests/environment-ubuntu-3.8-minimal.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.8-minimal -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.8-minimal diff --git a/tests/environment-ubuntu-3.8.yml b/tests/environment-ubuntu-3.8.yml deleted file mode 100644 index 925490a..0000000 --- a/tests/environment-ubuntu-3.8.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.8 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.8 diff --git a/tests/environment-ubuntu-3.9.yml b/tests/environment-ubuntu-3.9.yml deleted file mode 100644 index 4c80fc7..0000000 --- a/tests/environment-ubuntu-3.9.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: ubuntu-3.9 -channels: - - defaults - - conda-forge -dependencies: - - attrs - - eccodes=>2.19.0 - - python-eccodes - - nomkl - - numpy - - pandas - - requests -prefix: /usr/share/miniconda/envs/ubuntu-3.9 diff --git a/tests/environment-unit-tests.yml b/tests/environment-unit-tests.yml new file mode 100644 index 0000000..080713d --- /dev/null +++ b/tests/environment-unit-tests.yml @@ -0,0 +1,25 @@ +name: pdbufr +channels: +- conda-forge +- nodefaults +dependencies: +- attrs +- pip +- numpy +- pandas +- make +- mypy +- myst-parser +- pre-commit +- pydata-sphinx-theme +- pytest +- pytest-cov +- requests +- sphinx +- sphinx-autoapi +- sphinx_rtd_theme +- sphinxcontrib-apidoc +- nbformat +- nbconvert +- nbsphinx +- ipykernel diff --git a/tests/environment-windows-3.8.yml b/tests/environment-windows-3.8.yml deleted file mode 100644 index c80d638..0000000 --- a/tests/environment-windows-3.8.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: windows-3.8 -channels: - - defaults - - conda-forge -dependencies: - - attrs=20.3.0 - - blas=1.0 - - bzip2=1.0.8 - - ca-certificates=2020.10.14 - - certifi=2020.6.20 - - cffi=1.14.3 - - curl=7.71.1 - - eccodes=2.19.1 - - hdf4=4.2.13 - - hdf5=1.10.6 - - intel-openmp=2020.2 - - jpeg=9d - - krb5=1.18.2 - - libcurl=7.71.1 - - libnetcdf=4.7.4 - - libpng=1.6.37 - - libssh2=1.9.0 - - libtiff=4.1.0 - - lz4-c=1.9.2 - - mkl=2020.2 - - mkl-service=2.3.0 - - mkl_fft=1.2.0 - - mkl_random=1.1.1 - - numpy=1.19.2 - - numpy-base=1.19.2 - - openjpeg=2.3.1 - - openssl=1.1.1h - - pandas=1.1.3 - - pip=20.2.4 - - pycparser=2.20 - - python=3.8.5 - - python-dateutil=2.8.1 - - python-eccodes=1.4.2 - - python_abi=3.8 - - pytz=2020.1 - - setuptools=50.3.1 - - six=1.15.0 - - sqlite=3.33.0 - - vc=14.1 - - vs2015_runtime=14.16.27012 - - wheel=0.35.1 - - wincertstore=0.2 - - xz=5.2.5 - - zlib=1.2.11 - - zstd=1.4.5 -prefix: C:\Miniconda\envs\windows-3.8 diff --git a/tests/test_40_sample_data.py b/tests/test_40_sample_data.py index 251ba2d..d34605d 100644 --- a/tests/test_40_sample_data.py +++ b/tests/test_40_sample_data.py @@ -738,7 +738,7 @@ def timestamp(s: str) -> T.Any: ts = pd.Timestamp(s) try: ts = ts.as_unit("ns") - except: + except Exception: pass return ts diff --git a/tests/test_60_flat_mode.py b/tests/test_60_flat_mode.py index 120d6fc..3b525b5 100644 --- a/tests/test_60_flat_mode.py +++ b/tests/test_60_flat_mode.py @@ -75,19 +75,19 @@ def test_read_flat_bufr_args() -> None: assert len(res.columns) == 103 assert len(res) == 50 - with pytest.raises(ValueError) as exc: + with pytest.raises(ValueError): res = pdbufr.read_bufr(TEST_DATA_1, "a", flat=True) - with pytest.raises(ValueError) as exc: + with pytest.raises(ValueError): res = pdbufr.read_bufr(TEST_DATA_1, ["a", "a"], flat=True) - with pytest.raises(TypeError) as exc_t: + with pytest.raises(TypeError): res = pdbufr.read_bufr(TEST_DATA_1, 3, flat=True) # type: ignore - with pytest.raises(ValueError) as exc: + with pytest.raises(ValueError): res = pdbufr.read_bufr(TEST_DATA_1, [3], flat=True) # type: ignore - with pytest.raises(ValueError) as exc: + with pytest.raises(ValueError): res = pdbufr.read_bufr(TEST_DATA_1, [3, 4], flat=True) # type: ignore diff --git a/tests/test_70_message_list.py b/tests/test_70_message_list.py index 0f8793c..bcf39a0 100644 --- a/tests/test_70_message_list.py +++ b/tests/test_70_message_list.py @@ -13,7 +13,7 @@ pd = pytest.importorskip("pandas") -from pdbufr import read_bufr +from pdbufr import read_bufr # noqa: E402 assert_frame_equal = pd.testing.assert_frame_equal