Skip to content

Commit

Permalink
MNT: Clean up some lint found by ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
dopplershift committed Nov 4, 2024
1 parent f13fc99 commit 20cd923
Show file tree
Hide file tree
Showing 30 changed files with 117 additions and 141 deletions.
2 changes: 1 addition & 1 deletion ci/download_cartopy_maps.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright (c) 2021 MetPy Developers.
# Copyright (c) 2021 Siphon Contributors.
"""Explicitly download needed Cartopy maps."""
from cartopy.io import config, Downloader

Expand Down
2 changes: 1 addition & 1 deletion ci/filter_links.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python
# Copyright (c) 2021 MetPy Developers.
# Copyright (c) 2021 Siphon Contributors.
"""Filter links from Sphinx's linkcheck."""
import json
import subprocess
Expand Down
4 changes: 2 additions & 2 deletions ci/gen_versions_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@

import glob

with open('versions.json', 'wt') as version_file:
version_strings = ','.join('"{}"'.format(d) for d in glob.glob('v*.[0-9]*'))
with open('versions.json', 'w') as version_file:
version_strings = ','.join(f'"{d}"' for d in glob.glob('v*.[0-9]*'))
version_file.write('{"versions":["latest","dev",' + version_strings + ']}\n')
21 changes: 21 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,27 @@ extras = [
"Bug Tracker" = "https://github.com/Unidata/siphon/issues"
"Source Code" = "https://github.com/Unidata/siphon"

[tool.doc8]
ignore-path = ["docs/build", "docs/api/generated", "docs/_templates", "docs/examples"]
file-encoding = "utf8"
max-line-length = 95

[tool.isort]
line_length = 95
known_first_party = ["siphon"]
force_single_line = false
reverse_relative = true
use_parentheses = true
force_sort_within_sections = true
order_by_type = false
sort_relative_in_force_sorted_sections = true
combine_as_imports = true
combine_star = true

[tool.pytest.ini_options]
norecursedirs = "build docs .idea"
doctest_optionflags = "NORMALIZE_WHITESPACE"

[tool.ruff]
line-length = 95
exclude = ["docs", "build", "src/siphon/cdmr/*_pb2.py"]
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ max-line-length = 95
rst-roles = class, data, doc, func, meth, mod
rst-directives = plot, versionchanged
known-modules = netcdf4:[netCDF4]
exclude = docs build
exclude = docs build src/siphon/cdmr/*_pb2.py
select = E301 E302 E303 E304 E305 E306 I R
ignore = F405 W503 RST902 SIM106
per-file-ignores = src/siphon/_version.py: I900
Expand Down
53 changes: 22 additions & 31 deletions src/siphon/catalog.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,9 @@
from datetime import datetime
import logging
import re
from urllib.parse import urljoin, urlparse
import warnings
import xml.etree.ElementTree as ET # noqa:N814
try:
from urlparse import urljoin, urlparse
except ImportError:
# Python 3
from urllib.parse import urljoin, urlparse

from .http_util import session_manager
from .metadata import TDSCatalogMetadata
Expand Down Expand Up @@ -47,10 +43,7 @@ class DatasetCollection(IndexableMapping):
def _get_datasets_with_times(self, regex, strptime=None):
# Set the default regex if we don't have one
# If strptime is provided, pass the regex group named 'strptime' to strptime
if regex is None:
regex = self.default_regex
else:
regex = re.compile(regex)
regex = self.default_regex if regex is None else re.compile(regex)

# Loop over the collection looking for keys that match our regex
found_date = False
Expand Down Expand Up @@ -149,7 +142,7 @@ def filter_time_range(self, start, end, regex=None, strptime=None):
"""
if start > end:
warnings.warn('The provided start time comes after the end time. No data will '
'be returned.', UserWarning)
'be returned.', UserWarning, stacklevel=2)
return [item[-1] for item in self._get_datasets_with_times(regex, strptime)
if start <= item[0] <= end]

Expand Down Expand Up @@ -289,8 +282,8 @@ def __init__(self, catalog_url):
if 'html' in resp.headers['content-type']:
import warnings
new_url = self.catalog_url.replace('html', 'xml')
warnings.warn('URL {} returned HTML. Changing to: {}'.format(self.catalog_url,
new_url))
warnings.warn(f'URL {self.catalog_url} returned HTML. Changing to: {new_url}',
stacklevel=2)
self.catalog_url = new_url
resp = self.session.get(self.catalog_url)
resp.raise_for_status()
Expand Down Expand Up @@ -362,9 +355,8 @@ def __del__(self):

def _process_dataset(self, element):
catalog_url = ''
if 'urlPath' in element.attrib:
if element.attrib['urlPath'] == 'latest.xml':
catalog_url = self.catalog_url
if 'urlPath' in element.attrib and element.attrib['urlPath'] == 'latest.xml':
catalog_url = self.catalog_url

ds = Dataset(element, catalog_url=catalog_url)
self.datasets[ds.name] = ds
Expand Down Expand Up @@ -524,24 +516,21 @@ def resolve_url(self, catalog_url):
resolver_xml = session_manager.urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if 'name' in root.attrib:
self.catalog_name = root.attrib['name']
else:
self.catalog_name = 'No name found'
self.catalog_name = root.attrib.get('name', 'No name found')
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == 'dataset':
if 'urlPath' in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if tag_type == 'dataset' and 'urlPath' in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning('no dataset url path found in latest.xml!')
return None

def make_access_urls(self, catalog_url, all_services, metadata=None):
"""Make fully qualified urls for the access methods enabled on the dataset.
Expand Down Expand Up @@ -611,9 +600,8 @@ def download(self, filename=None):
"""
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read())
with self.remote_open() as infile, open(filename, 'wb') as outfile:
outfile.write(infile.read())

def remote_open(self, mode='b', encoding='ascii', errors='ignore'):
"""Open the remote dataset for random access.
Expand Down Expand Up @@ -727,7 +715,8 @@ def access_with_service(self, service, use_xarray=None):
import xarray as xr
provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731
except ImportError:
raise ImportError('CdmRemote access needs xarray to be installed.')
raise ImportError('CdmRemote access needs xarray'
'to be installed.') from None
else:
from .cdmr import Dataset as CDMRDataset
provider = CDMRDataset
Expand All @@ -737,13 +726,15 @@ def access_with_service(self, service, use_xarray=None):
import xarray as xr
provider = xr.open_dataset
except ImportError:
raise ImportError('xarray needs to be installed if `use_xarray` is True.')
raise ImportError('xarray needs to be installed if '
'`use_xarray` is True.') from None
else:
try:
from netCDF4 import Dataset as NC4Dataset
provider = NC4Dataset
except ImportError:
raise ImportError('OPENDAP access needs netCDF4-python to be installed.')
raise ImportError('OPENDAP access needs netCDF4-python'
'to be installed.') from None
elif service in self.ncss_service_names:
from .ncss import NCSS
provider = NCSS
Expand All @@ -755,7 +746,7 @@ def access_with_service(self, service, use_xarray=None):
try:
return provider(self.access_urls[service])
except KeyError:
raise ValueError(service + ' is not available for this dataset')
raise ValueError(service + ' is not available for this dataset') from None

__repr__ = __str__

Expand Down
2 changes: 1 addition & 1 deletion src/siphon/cdmr/coveragedataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(self, url):
"""Initialize CoverageDataset from a url pointing to CDMRemoteFeature endpoint."""
super().__init__()
warnings.warn('CoverageDataset is in early development, unsupported, and API may '
'change at any time.')
'change at any time.', stacklevel=2)
self.cdmrf = CDMRemoteFeature(url)
self.name = 'Unnamed'
self.lon_lat_domain = None
Expand Down
17 changes: 5 additions & 12 deletions src/siphon/cdmr/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,9 @@ def _process_indices(self, ind):
except TypeError:
ind = [ind]

# Make sure we don't have too many things to index
if len(ind) > self.ndim:
# But allow a full slice on a scalar variable
if not (self.ndim == 0 and len(ind) == 1 and ind[0] == slice(None)):
# Make sure we don't have too many things to index, but allow a full slice on
# a scalar variable
if len(ind) > self.ndim and (self.ndim != 0 or len(ind) != 1 or ind[0] != slice(None)):
raise IndexError('Too many dimensions to index.')

# Expand to a slice/ellipsis for every dimension
Expand Down Expand Up @@ -260,15 +259,9 @@ def _process_indices(self, ind):

# Adjust start and stop to handle negative indexing
# and partial support for slicing beyond end.
if i.start is None:
start = 0
else:
start = self._adjust_index(dim, i.start)
start = 0 if i.start is None else self._adjust_index(dim, i.start)

if i.stop is None:
stop = self.shape[dim]
else:
stop = self._adjust_index(dim, i.stop)
stop = self.shape[dim] if i.stop is None else self._adjust_index(dim, i.stop)

# Need to create new slice for adjusted values
ind[dim] = slice(start, stop, i.step)
Expand Down
10 changes: 5 additions & 5 deletions src/siphon/cdmr/ncstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@

import numpy as np

from . import cdmrfeature_pb2 as cdmrf
from . import ncStream_pb2 as stream # noqa
from . import cdmrfeature_pb2 as cdmrf, ncStream_pb2 as stream

MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
Expand Down Expand Up @@ -58,7 +57,9 @@ def read_ncstream_data(fobj):
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
if len(bin_data) != data.uncompressedSize:
log.error('Uncompressed size mismatch %d vs. %d', len(bin_data),
data.uncompressedSize)
elif data.compress != stream.NONE:
raise NotImplementedError(f'Compression type {data.compress} not implemented!')

Expand Down Expand Up @@ -140,8 +141,7 @@ def read_messages(fobj, magic_table):
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join(f'{b: 02x}'
for b in bytearray(magic))))
log.error('Unknown magic: %s', ' '.join(f'{b: 02x}' for b in bytearray(magic)))

return messages

Expand Down
1 change: 1 addition & 0 deletions src/siphon/cdmr/xarray_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from xarray import Variable
from xarray.backends.common import AbstractDataStore, BackendArray
from xarray.core import indexing

try:
from xarray.core.utils import FrozenDict
except ImportError:
Expand Down
8 changes: 2 additions & 6 deletions src/siphon/http_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,8 @@
from io import BytesIO
from itertools import chain
import posixpath
from urllib.parse import urlencode, urljoin # noqa: F401
import warnings
try:
from urllib.parse import urlencode, urljoin # noqa
except ImportError:
from urllib import urlencode
from urlparse import urljoin # noqa

import requests

Expand Down Expand Up @@ -324,7 +320,7 @@ def time_range(self, start, end):
"""
if start > end:
warnings.warn('The provided start time comes after the end time. No data will '
'be returned.', UserWarning)
'be returned.', UserWarning, stacklevel=2)
self._set_query(self.time_query, time_start=self._format_time(start),
time_end=self._format_time(end))
return self
Expand Down
21 changes: 5 additions & 16 deletions src/siphon/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,10 +166,7 @@ def handle_dataType(self, element): # noqa
class _ComplexTypes:
@staticmethod
def _get_tag_name(element):
if '}' in element.tag:
element_name = element.tag.split('}')[-1]
else:
element_name = element.tag
element_name = element.tag.split('}')[-1] if '}' in element.tag else element.tag
return element_name

@staticmethod
Expand Down Expand Up @@ -468,10 +465,7 @@ def __init__(self, element, metadata_in=None):
inherited = False
if 'inherited' in element.attrib:
inherited = element.attrib['inherited']
if inherited == 'true':
inherited = True
else:
inherited = False
inherited = inherited == 'true'

if metadata_in and (inherited or self._is_external_metadata_doc(element)):
# only inherit metadata passed in if the new metadata
Expand All @@ -491,10 +485,7 @@ def __init__(self, element, metadata_in=None):

@staticmethod
def _get_tag_name(element):
if '}' in element.tag:
element_name = element.tag.split('}')[-1]
else:
element_name = element.tag
element_name = element.tag.split('}')[-1] if '}' in element.tag else element.tag
return element_name

@staticmethod
Expand All @@ -512,6 +503,7 @@ def _get_handler(self, handler_name):
return getattr(self._st, handler_name)
else:
log.warning('cannot find handler for element %s', handler_name)
return None

def _parse_element(self, element):

Expand Down Expand Up @@ -572,10 +564,7 @@ def _parse_documentation(self, element):
md = self.metadata
md.setdefault('documentation', {})
if known or plain_doc:
if known:
doc_type = element.attrib['type']
else:
doc_type = 'generic'
doc_type = element.attrib['type'] if known else 'generic'
md['documentation'].setdefault(doc_type, []).append(element.text)
elif xlink_href_attr in element.attrib:
title = element.attrib[xlink_title_attr]
Expand Down
9 changes: 5 additions & 4 deletions src/siphon/ncss.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ def combine_xml_points(seq, units, handle_units):

def parse_xml_dataset(elem, handle_units):
"""Create a netCDF-like dataset from XML data."""
points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')])
points, units = zip(*[parse_xml_point(p) for p in elem.findall('point')], strict=False)
# Group points by the contents of each point
datasets = {}
for p in points:
Expand All @@ -359,9 +359,10 @@ def parse_xml_dataset(elem, handle_units):

# Handling of netCDF 3/4 from NCSS
try:
from netCDF4 import Dataset
from tempfile import NamedTemporaryFile

from netCDF4 import Dataset

@response_handlers.register('application/x-netcdf')
@response_handlers.register('application/x-netcdf4')
def read_netcdf(data, handle_units): # pylint:disable=unused-argument
Expand All @@ -381,7 +382,7 @@ def read_netcdf(data, handle_units): # pylint:disable=unused-argument
except ImportError:
import warnings
warnings.warn('netCDF4 module not installed. '
'Will be unable to handle NetCDF returns from NCSS.')
'Will be unable to handle NetCDF returns from NCSS.', stacklevel=2)


def deletetempfile(fname):
Expand All @@ -395,7 +396,7 @@ def deletetempfile(fname):
import warnings
warnings.warn('temporary netcdf dataset file not deleted. '
'to delete temporary dataset file in the future '
'be sure to use dataset.close() when finished.')
'be sure to use dataset.close() when finished.', stacklevel=2)


# Parsing of CSV data returned from NCSS
Expand Down
Loading

0 comments on commit 20cd923

Please sign in to comment.