From 72ae5bfdcbaedd0b2075570d63de56531364bec8 Mon Sep 17 00:00:00 2001 From: "John T. Wodder II" Date: Mon, 18 Dec 2023 16:11:03 -0500 Subject: [PATCH] Update for Pydantic v2 --- dandi/cli/cmd_download.py | 6 +- dandi/cli/cmd_ls.py | 6 +- dandi/cli/cmd_service_scripts.py | 2 +- .../update_dandiset_from_doi/biorxiv.json | 2 +- .../data/update_dandiset_from_doi/elife.json | 2 +- .../update_dandiset_from_doi/jneurosci.json | 2 +- .../data/update_dandiset_from_doi/nature.json | 2 +- .../data/update_dandiset_from_doi/neuron.json | 2 +- dandi/dandiapi.py | 73 +++++++++---------- dandi/dandiarchive.py | 7 +- dandi/files/bases.py | 4 +- dandi/files/bids.py | 9 ++- dandi/metadata/core.py | 7 +- dandi/metadata/util.py | 10 +-- .../data/metadata/metadata2asset_simple1.json | 2 +- dandi/tests/test_dandiapi.py | 2 +- dandi/tests/test_metadata.py | 51 ++++++------- dandi/upload.py | 2 +- dandi/utils.py | 57 ++++++++++----- 19 files changed, 137 insertions(+), 111 deletions(-) diff --git a/dandi/cli/cmd_download.py b/dandi/cli/cmd_download.py index 7bd4bf76d..ee43459d6 100644 --- a/dandi/cli/cmd_download.py +++ b/dandi/cli/cmd_download.py @@ -9,7 +9,7 @@ from ..dandiarchive import _dandi_url_parser, parse_dandi_url from ..dandiset import Dandiset from ..download import DownloadExisting, DownloadFormat, PathType -from ..utils import get_instance +from ..utils import get_instance, joinurl # The use of f-strings apparently makes this not a proper docstring, and so @@ -131,9 +131,9 @@ def download( pass else: if instance.gui is not None: - url = [f"{instance.gui}/#/dandiset/{dandiset_id}/draft"] + url = [joinurl(instance.gui, f"/#/dandiset/{dandiset_id}/draft")] else: - url = [f"{instance.api}/dandisets/{dandiset_id}/"] + url = [joinurl(instance.api, f"/dandisets/{dandiset_id}/")] return download.download( url, diff --git a/dandi/cli/cmd_ls.py b/dandi/cli/cmd_ls.py index 72e933aad..eeb3dbecd 100644 --- a/dandi/cli/cmd_ls.py +++ b/dandi/cli/cmd_ls.py @@ -96,8 +96,8 @@ def ls( all_fields = tuple( sorted( set(common_fields) - | models.Dandiset.__fields__.keys() - | models.Asset.__fields__.keys() + | models.Dandiset.model_fields.keys() + | models.Asset.model_fields.keys() ) ) else: @@ -345,7 +345,7 @@ def fn(): path, schema_version=schema, digest=Digest.dandi_etag(digest), - ).json_dict() + ).model_dump(mode="json", exclude_none=True) else: if path.endswith(tuple(ZARR_EXTENSIONS)): if use_fake_digest: diff --git a/dandi/cli/cmd_service_scripts.py b/dandi/cli/cmd_service_scripts.py index e8a2c4b11..9e25b13db 100644 --- a/dandi/cli/cmd_service_scripts.py +++ b/dandi/cli/cmd_service_scripts.py @@ -104,7 +104,7 @@ def reextract_metadata(url: str, diff: bool, when: str) -> None: lgr.info("Extracting new metadata for asset") metadata = nwb2asset(asset.as_readable(), digest=digest) metadata.path = asset.path - mddict = metadata.json_dict() + mddict = metadata.model_dump(mode="json", exclude_none=True) if diff: oldmd = asset.get_raw_metadata() oldmd_str = yaml_dump(oldmd) diff --git a/dandi/cli/tests/data/update_dandiset_from_doi/biorxiv.json b/dandi/cli/tests/data/update_dandiset_from_doi/biorxiv.json index eb6a40f38..6fe83f612 100644 --- a/dandi/cli/tests/data/update_dandiset_from_doi/biorxiv.json +++ b/dandi/cli/tests/data/update_dandiset_from_doi/biorxiv.json @@ -386,7 +386,7 @@ "includeInCitation": true } ], - "dateCreated": "2023-04-25T16:28:26.500181+00:00", + "dateCreated": "2023-04-25T16:28:26.500181Z", "description": "Progress in science requires standardized assays whose results can be readily shared, compared, and reproduced across laboratories. Reproducibility, however, has been a concern in neuroscience, particularly for measurements of mouse behavior. Here we show that a standardized task to probe decision-making in mice produces reproducible results across multiple laboratories. We designed a task for head-fixed mice that combines established assays of perceptual and value-based decision making, and we standardized training protocol and experimental hardware, software, and procedures. We trained 140 mice across seven laboratories in three countries, and we collected 5 million mouse choices into a publicly available database. Learning speed was variable across mice and laboratories, but once training was complete there were no significant differences in behavior across laboratories. Mice in different laboratories adopted similar reliance on visual stimuli, on past successes and failures, and on estimates of stimulus prior probability to guide their choices. These results reveal that a complex mouse behavior can be successfully reproduced across multiple laboratories. They establish a standard for reproducible rodent behavior, and provide an unprecedented dataset and open-access tools to study decision-making in mice. More generally, they indicate a path towards achieving reproducibility in neuroscience through collaborative open-science approaches.", "assetsSummary": { "schemaKey": "AssetsSummary", diff --git a/dandi/cli/tests/data/update_dandiset_from_doi/elife.json b/dandi/cli/tests/data/update_dandiset_from_doi/elife.json index 7008fa0ab..54dbf581f 100644 --- a/dandi/cli/tests/data/update_dandiset_from_doi/elife.json +++ b/dandi/cli/tests/data/update_dandiset_from_doi/elife.json @@ -105,7 +105,7 @@ "includeInCitation": true } ], - "dateCreated": "2023-04-25T16:28:30.453019+00:00", + "dateCreated": "2023-04-25T16:28:30.453019Z", "description": "Proprioception, the sense of body position, movement, and associated forces, remains poorly understood, despite its critical role in movement. Most studies of area 2, a proprioceptive area of somatosensory cortex, have simply compared neurons\u2019 activities to the movement of the hand through space. Using motion tracking, we sought to elaborate this relationship by characterizing how area 2 activity relates to whole arm movements. We found that a whole-arm model, unlike classic models, successfully predicted how features of neural activity changed as monkeys reached to targets in two workspaces. However, when we then evaluated this whole-arm model across active and passive movements, we found that many neurons did not consistently represent the whole arm over both conditions. These results suggest that 1) neural activity in area 2 includes representation of the whole arm during reaching and 2) many of these neurons represented limb state differently during active and passive movements.", "assetsSummary": { "schemaKey": "AssetsSummary", diff --git a/dandi/cli/tests/data/update_dandiset_from_doi/jneurosci.json b/dandi/cli/tests/data/update_dandiset_from_doi/jneurosci.json index ebed2dcaf..14f081ebd 100644 --- a/dandi/cli/tests/data/update_dandiset_from_doi/jneurosci.json +++ b/dandi/cli/tests/data/update_dandiset_from_doi/jneurosci.json @@ -45,7 +45,7 @@ "includeInCitation": true } ], - "dateCreated": "2023-04-25T16:28:28.308094+00:00", + "dateCreated": "2023-04-25T16:28:28.308094Z", "description": "Reinforcement learning theory plays a key role in understanding the behavioral and neural mechanisms of choice behavior in animals and humans. Especially, intermediate variables of learning models estimated from behavioral data, such as the expectation of reward for each candidate choice (action value), have been used in searches for the neural correlates of computational elements in learning and decision making. The aims of the present study are as follows: (1) to test which computational model best captures the choice learning process in animals and (2) to elucidate how action values are represented in different parts of the corticobasal ganglia circuit. We compared different behavioral learning algorithms to predict the choice sequences generated by rats during a free-choice task and analyzed associated neural activity in the nucleus accumbens (NAc) and ventral pallidum (VP). The major findings of this study were as follows: (1) modified versions of an action\u2013value learning model captured a variety of choice strategies of rats, including win-stay\u2013lose-switch and persevering behavior, and predicted rats' choice sequences better than the best multistep Markov model; and (2) information about action values and future actions was coded in both the NAc and VP, but was less dominant than information about trial types, selected actions, and reward outcome. The results of our model-based analysis suggest that the primary role of the NAc and VP is to monitor information important for updating choice behaviors. Information represented in the NAc and VP might contribute to a choice mechanism that is situated elsewhere.", "assetsSummary": { "schemaKey": "AssetsSummary", diff --git a/dandi/cli/tests/data/update_dandiset_from_doi/nature.json b/dandi/cli/tests/data/update_dandiset_from_doi/nature.json index 17f456374..ce5449d09 100644 --- a/dandi/cli/tests/data/update_dandiset_from_doi/nature.json +++ b/dandi/cli/tests/data/update_dandiset_from_doi/nature.json @@ -46,7 +46,7 @@ "includeInCitation": true } ], - "dateCreated": "2023-04-25T16:28:31.601155+00:00", + "dateCreated": "2023-04-25T16:28:31.601155Z", "description": "AbstractSpatial cognition depends on an accurate representation of orientation within an environment. Head direction cells in distributed brain regions receive a range of sensory inputs, but visual input is particularly important for aligning their responses to environmental landmarks. To investigate how population-level heading responses are aligned to visual input, we recorded from retrosplenial cortex (RSC) of head-fixed mice in a moving environment using two-photon calcium imaging. We show that RSC neurons are tuned to the animal\u2019s relative orientation in the environment, even in the absence of head movement. Next, we found that RSC receives functionally distinct projections from visual and thalamic areas and contains several functional classes of neurons. While some functional classes mirror RSC inputs, a newly discovered class coregisters visual and thalamic signals. Finally, decoding analyses reveal unique contributions to heading from each class. Our results suggest an RSC circuit for anchoring heading representations to environmental visual landmarks.", "assetsSummary": { "schemaKey": "AssetsSummary", diff --git a/dandi/cli/tests/data/update_dandiset_from_doi/neuron.json b/dandi/cli/tests/data/update_dandiset_from_doi/neuron.json index 87264d4a1..82a81a7fa 100644 --- a/dandi/cli/tests/data/update_dandiset_from_doi/neuron.json +++ b/dandi/cli/tests/data/update_dandiset_from_doi/neuron.json @@ -45,7 +45,7 @@ "includeInCitation": true } ], - "dateCreated": "2023-04-25T16:28:29.373034+00:00", + "dateCreated": "2023-04-25T16:28:29.373034Z", "description": "A test Dandiset", "assetsSummary": { "schemaKey": "AssetsSummary", diff --git a/dandi/dandiapi.py b/dandi/dandiapi.py index 7e9ab1178..ff93c5a3a 100644 --- a/dandi/dandiapi.py +++ b/dandi/dandiapi.py @@ -13,7 +13,7 @@ import re from time import sleep, time from types import TracebackType -from typing import TYPE_CHECKING, Any, ClassVar, Dict, FrozenSet, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional from urllib.parse import quote_plus, urlparse, urlunparse import click @@ -44,6 +44,7 @@ get_instance, is_interactive, is_page2_url, + joinurl, ) if TYPE_CHECKING: @@ -285,16 +286,12 @@ def request( def get_url(self, path: str) -> str: """ Append a slash-separated ``path`` to the instance's base URL. The two - components are separated by a single slash, and any trailing slashes - are removed. + components are separated by a single slash, removing any excess slashes + that would be present after naïve concatenation. If ``path`` is already an absolute URL, it is returned unchanged. """ - # Construct the url - if path.lower().startswith(("http://", "https://")): - return path - else: - return self.api_url.rstrip("/") + "/" + path.lstrip("/") + return joinurl(self.api_url, path) def get(self, path: str, **kwargs: Any) -> Any: """ @@ -614,7 +611,8 @@ def get_asset(self, asset_id: str) -> BaseRemoteAsset: return BaseRemoteAsset.from_base_data(self, info, metadata) -class APIBase(BaseModel): +# `arbitrary_types_allowed` is needed for `client: DandiAPIClient` +class APIBase(BaseModel, populate_by_name=True, arbitrary_types_allowed=True): """ Base class for API objects implemented in pydantic. @@ -622,21 +620,12 @@ class APIBase(BaseModel): detail; do not rely on it. """ - JSON_EXCLUDE: ClassVar[FrozenSet[str]] = frozenset(["client"]) - def json_dict(self) -> dict[str, Any]: """ Convert to a JSONable `dict`, omitting the ``client`` attribute and using the same field names as in the API """ - data = json.loads(self.json(exclude=self.JSON_EXCLUDE, by_alias=True)) - assert isinstance(data, dict) - return data - - class Config: - allow_population_by_field_name = True - # To allow `client: Session`: - arbitrary_types_allowed = True + return self.model_dump(mode="json", by_alias=True) class Version(APIBase): @@ -710,7 +699,7 @@ class RemoteDandisetData(APIBase): modified: datetime contact_person: str embargo_status: EmbargoStatus - most_recent_published_version: Optional[Version] + most_recent_published_version: Optional[Version] = None draft_version: Version @@ -752,7 +741,7 @@ def __init__( self._version = version self._data: RemoteDandisetData | None if data is not None: - self._data = RemoteDandisetData.parse_obj(data) + self._data = RemoteDandisetData.model_validate(data) else: self._data = None @@ -762,7 +751,7 @@ def __str__(self) -> str: def _get_data(self) -> RemoteDandisetData: if self._data is None: try: - self._data = RemoteDandisetData.parse_obj( + self._data = RemoteDandisetData.model_validate( self.client.get(f"/dandisets/{self.identifier}/") ) except HTTP404Error: @@ -875,9 +864,9 @@ def from_data(cls, client: DandiAPIClient, data: dict[str, Any]) -> RemoteDandis when acquiring data using means outside of this library. """ if data.get("most_recent_published_version") is not None: - version = Version.parse_obj(data["most_recent_published_version"]) + version = Version.model_validate(data["most_recent_published_version"]) else: - version = Version.parse_obj(data["draft_version"]) + version = Version.model_validate(data["draft_version"]) return cls( client=client, identifier=data["identifier"], version=version, data=data ) @@ -917,7 +906,7 @@ def get_versions(self, order: str | None = None) -> Iterator[Version]: for v in self.client.paginate( f"{self.api_path}versions/", params={"order": order} ): - yield Version.parse_obj(v) + yield Version.model_validate(v) except HTTP404Error: raise NotFoundError(f"No such Dandiset: {self.identifier!r}") @@ -932,7 +921,7 @@ def get_version(self, version_id: str) -> VersionInfo: `Version`. """ try: - return VersionInfo.parse_obj( + return VersionInfo.model_validate( self.client.get( f"/dandisets/{self.identifier}/versions/{version_id}/info/" ) @@ -972,7 +961,7 @@ def get_metadata(self) -> models.Dandiset: Fetch the metadata for this version of the Dandiset as a `dandischema.models.Dandiset` instance """ - return models.Dandiset.parse_obj(self.get_raw_metadata()) + return models.Dandiset.model_validate(self.get_raw_metadata()) def get_raw_metadata(self) -> dict[str, Any]: """ @@ -990,7 +979,7 @@ def set_metadata(self, metadata: models.Dandiset) -> None: """ Set the metadata for this version of the Dandiset to the given value """ - self.set_raw_metadata(metadata.json_dict()) + self.set_raw_metadata(metadata.model_dump(mode="json", exclude_none=True)) def set_raw_metadata(self, metadata: dict[str, Any]) -> None: """ @@ -1043,7 +1032,7 @@ def publish(self, max_time: float = 120) -> RemoteDandiset: ) start = time() while time() - start < max_time: - v = Version.parse_obj(self.client.get(f"{draft_api_path}info/")) + v = Version.model_validate(self.client.get(f"{draft_api_path}info/")) if v.status is VersionStatus.PUBLISHED: break sleep(0.5) @@ -1267,7 +1256,7 @@ class BaseRemoteAsset(ABC, APIBase): #: The `DandiAPIClient` instance that returned this `BaseRemoteAsset` #: and which the latter will use for API requests - client: DandiAPIClient + client: DandiAPIClient = Field(exclude=True) #: The asset identifier identifier: str = Field(alias="asset_id") #: The asset's (forward-slash-separated) path @@ -1288,6 +1277,15 @@ def __init__(self, **data: Any) -> None: # type: ignore[no-redef] # underscores, so we have to do it ourselves. self._metadata = data.get("metadata", data.get("_metadata")) + def __eq__(self, other: Any) -> bool: + if type(self) is type(other): + # dict() includes fields with `exclude=True` (which are absent from + # the return value of `model_dump()`) but not private fields. We + # want to compare the former but not the latter. + return dict(self) == dict(other) + else: + return NotImplemented + def __str__(self) -> str: return f"{self.client._instance_id}:assets/{self.identifier}" @@ -1348,7 +1346,7 @@ def get_metadata(self) -> models.Asset: Fetch the metadata for the asset as a `dandischema.models.Asset` instance """ - return models.Asset.parse_obj(self.get_raw_metadata()) + return models.Asset.model_validate(self.get_raw_metadata()) def get_raw_metadata(self) -> dict[str, Any]: """Fetch the metadata for the asset as an unprocessed `dict`""" @@ -1598,7 +1596,7 @@ def iterfiles(self, prefix: str | None = None) -> Iterator[RemoteZarrEntry]: for r in self.client.paginate( f"{self.client.api_url}/zarr/{self.zarr}/files", params={"prefix": prefix} ): - data = ZarrEntryServerData.parse_obj(r) + data = ZarrEntryServerData.model_validate(r) yield RemoteZarrEntry.from_server_data(self, data) def get_entry_by_path(self, path: str) -> RemoteZarrEntry: @@ -1655,13 +1653,12 @@ class RemoteAsset(BaseRemoteAsset): `RemoteDandiset`. """ - JSON_EXCLUDE = frozenset(["client", "dandiset_id", "version_id"]) - #: The identifier for the Dandiset to which the asset belongs - dandiset_id: str + dandiset_id: str = Field(exclude=True) + #: The identifier for the version of the Dandiset to which the asset #: belongs - version_id: str + version_id: str = Field(exclude=True) @classmethod def from_data( @@ -1726,7 +1723,9 @@ def set_metadata(self, metadata: models.Asset) -> None: Set the metadata for the asset to the given value and update the `RemoteAsset` in place. """ - return self.set_raw_metadata(metadata.json_dict()) + return self.set_raw_metadata( + metadata.model_dump(mode="json", exclude_none=True) + ) @abstractmethod def set_raw_metadata(self, metadata: dict[str, Any]) -> None: diff --git a/dandi/dandiarchive.py b/dandi/dandiarchive.py index 3a849bbe9..53c3506cb 100644 --- a/dandi/dandiarchive.py +++ b/dandi/dandiarchive.py @@ -37,7 +37,7 @@ from typing import Any from urllib.parse import unquote as urlunquote -from pydantic import AnyHttpUrl, parse_obj_as +from pydantic import AnyHttpUrl, TypeAdapter import requests from . import get_logger @@ -82,9 +82,8 @@ class ParsedDandiURL(ABC): def api_url(self) -> AnyHttpUrl: """The base URL of the Dandi API service, without a trailing slash""" # Kept for backwards compatibility - r = parse_obj_as(AnyHttpUrl, self.instance.api.rstrip("/")) - assert isinstance(r, AnyHttpUrl) - return r # type: ignore[no-any-return] + adapter = TypeAdapter(AnyHttpUrl) + return adapter.validate_python(self.instance.api.rstrip("/")) def get_client(self) -> DandiAPIClient: """ diff --git a/dandi/files/bases.py b/dandi/files/bases.py index 45f5929f5..aa1333950 100644 --- a/dandi/files/bases.py +++ b/dandi/files/bases.py @@ -98,7 +98,7 @@ def get_metadata( """Return the Dandiset metadata inside the file""" with open(self.filepath) as f: meta = yaml_load(f, typ="safe") - return DandisetMeta.unvalidated(**meta) + return DandisetMeta.model_construct(**meta) # TODO: @validate_cache.memoize_path def get_validation_errors( @@ -184,7 +184,7 @@ def get_validation_errors( ) try: asset = self.get_metadata(digest=self._DUMMY_DIGEST) - BareAsset(**asset.dict()) + BareAsset(**asset.model_dump()) except ValidationError as e: if devel_debug: raise diff --git a/dandi/files/bids.py b/dandi/files/bids.py index 0ab0784f0..8541b5030 100644 --- a/dandi/files/bids.py +++ b/dandi/files/bids.py @@ -94,7 +94,9 @@ def _validate(self) -> None: ) # Don't apply eta-reduction to the lambda, as mypy needs to be # assured that defaultdict's argument takes no parameters. - self._asset_metadata = defaultdict(lambda: BareAsset.unvalidated()) + self._asset_metadata = defaultdict( + lambda: BareAsset.model_construct() # type: ignore[call-arg] + ) for result in results: if result.id in BIDS_ASSET_ERRORS: assert result.path @@ -230,7 +232,10 @@ def get_metadata( bids_metadata = BIDSAsset.get_metadata(self, digest, ignore_errors) nwb_metadata = NWBAsset.get_metadata(self, digest, ignore_errors) return BareAsset( - **{**bids_metadata.dict(), **nwb_metadata.dict(exclude_none=True)} + **{ + **bids_metadata.model_dump(), + **nwb_metadata.model_dump(exclude_none=True), + } ) diff --git a/dandi/metadata/core.py b/dandi/metadata/core.py index 3e97dbc24..76355a76b 100644 --- a/dandi/metadata/core.py +++ b/dandi/metadata/core.py @@ -5,7 +5,7 @@ import re from dandischema import models -from pydantic import ByteSize, parse_obj_as +from pydantic import ByteSize, TypeAdapter from .util import extract_model, get_generator from .. import get_logger @@ -18,7 +18,7 @@ def get_default_metadata( path: str | Path | Readable, digest: Digest | None = None ) -> models.BareAsset: - metadata = models.BareAsset.unvalidated() + metadata = models.BareAsset.model_construct() # type: ignore[call-arg] start_time = end_time = datetime.now().astimezone() add_common_metadata(metadata, path, start_time, end_time, digest) return metadata @@ -56,7 +56,8 @@ def add_common_metadata( ) if m: size = int(m["size"]) - metadata.contentSize = parse_obj_as(ByteSize, size) + adapter = TypeAdapter(ByteSize) + metadata.contentSize = adapter.validate_python(size) if metadata.wasGeneratedBy is None: metadata.wasGeneratedBy = [] metadata.wasGeneratedBy.append(get_generator(start_time, end_time)) diff --git a/dandi/metadata/util.py b/dandi/metadata/util.py index cfc57bb49..b9cb33aa5 100644 --- a/dandi/metadata/util.py +++ b/dandi/metadata/util.py @@ -498,13 +498,13 @@ def extract_anatomy(metadata: dict) -> list[models.Anatomy] | None: def extract_model(modelcls: type[M], metadata: dict, **kwargs: Any) -> M: - m = modelcls.unvalidated() - for field in m.__fields__.keys(): + m = modelcls.model_construct() + for field in m.model_fields.keys(): value = kwargs.get(field, extract_field(field, metadata)) if value is not None: setattr(m, field, value) - # return modelcls(**m.dict()) - return m + # I can't figure out why mypy doesn't like this line: + return m # type: ignore[return-value] def extract_model_list( @@ -514,7 +514,7 @@ def func(metadata: dict) -> list[M]: m = extract_model( modelcls, metadata, **{id_field: metadata.get(id_source)}, **kwargs ) - if all(v is None for k, v in m.dict().items() if k != "schemaKey"): + if all(v is None for k, v in m.model_dump().items() if k != "schemaKey"): return [] else: return [m] diff --git a/dandi/tests/data/metadata/metadata2asset_simple1.json b/dandi/tests/data/metadata/metadata2asset_simple1.json index 9c742c1b9..04babc1a1 100644 --- a/dandi/tests/data/metadata/metadata2asset_simple1.json +++ b/dandi/tests/data/metadata/metadata2asset_simple1.json @@ -18,7 +18,7 @@ "identifier": "session_id1", "name": "session_id1", "description": "session_description1", - "startDate": "2017-04-15T12:00:00+00:00" + "startDate": "2017-04-15T12:00:00Z" } ], "contentSize": 69105, diff --git a/dandi/tests/test_dandiapi.py b/dandi/tests/test_dandiapi.py index 4d354b809..077f16ce7 100644 --- a/dandi/tests/test_dandiapi.py +++ b/dandi/tests/test_dandiapi.py @@ -496,7 +496,7 @@ def test_set_asset_metadata(text_dandiset: SampleDandiset) -> None: md = asset.get_metadata() md.blobDateModified = datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc) asset.set_metadata(md) - assert asset.get_raw_metadata()["blobDateModified"] == "2038-01-19T03:14:07+00:00" + assert asset.get_raw_metadata()["blobDateModified"] == "2038-01-19T03:14:07Z" def test_remote_dandiset_json_dict(text_dandiset: SampleDandiset) -> None: diff --git a/dandi/tests/test_metadata.py b/dandi/tests/test_metadata.py index 4c0e3bf0b..12ce1a51f 100644 --- a/dandi/tests/test_metadata.py +++ b/dandi/tests/test_metadata.py @@ -27,6 +27,7 @@ ) from dandischema.models import Dandiset as DandisetMeta from dateutil.tz import tzutc +from pydantic import ByteSize import pytest from semantic_version import Version @@ -397,7 +398,7 @@ def test_timedelta2duration(td: timedelta, duration: str) -> None: ], ) def test_prepare_metadata(filename: str, metadata: dict[str, Any]) -> None: - data = prepare_metadata(metadata).json_dict() + data = prepare_metadata(metadata).model_dump(mode="json", exclude_none=True) with (METADATA_DIR / filename).open() as fp: data_as_dict = json.load(fp) data_as_dict["schemaVersion"] = DANDI_SCHEMA_VERSION @@ -492,7 +493,7 @@ def test_parseobourl(url, value): @mark.skipif_no_network def test_species(): m = {"species": "http://purl.obolibrary.org/obo/NCBITaxon_28584"} - assert extract_species(m).json_dict() == { + assert extract_species(m).model_dump(mode="json", exclude_none=True) == { "identifier": "http://purl.obolibrary.org/obo/NCBITaxon_28584", "schemaKey": "SpeciesType", "name": "Drosophila suzukii", @@ -772,7 +773,7 @@ def test_species_map(): ], ) def test_ndtypes(ndtypes, asset_dict): - metadata = BareAsset.unvalidated( + metadata = BareAsset( contentSize=1, encodingFormat="application/x-nwb", digest={DigestType.dandi_etag: "0" * 32 + "-1"}, @@ -790,24 +791,24 @@ def test_ndtypes(ndtypes, asset_dict): @mark.skipif_no_network def test_nwb2asset(simple2_nwb: Path) -> None: - assert nwb2asset(simple2_nwb, digest=DUMMY_DANDI_ETAG) == BareAsset.unvalidated( + assert nwb2asset(simple2_nwb, digest=DUMMY_DANDI_ETAG) == BareAsset.model_construct( schemaKey="Asset", schemaVersion=DANDI_SCHEMA_VERSION, keywords=["keyword1", "keyword 2"], access=[ - AccessRequirements.unvalidated( + AccessRequirements( schemaKey="AccessRequirements", status=AccessType.OpenAccess ) ], wasGeneratedBy=[ - Session.unvalidated( + Session.model_construct( schemaKey="Session", identifier="session_id1", name="session_id1", description="session_description1", startDate=ANY_AWARE_DATETIME, ), - Activity.unvalidated( + Activity.model_construct( id=AnyFullmatch( r"urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" ), @@ -817,7 +818,7 @@ def test_nwb2asset(simple2_nwb: Path) -> None: startDate=ANY_AWARE_DATETIME, endDate=ANY_AWARE_DATETIME, wasAssociatedWith=[ - Software.unvalidated( + Software( schemaKey="Software", identifier="RRID:SCR_019009", name="DANDI Command Line Interface", @@ -827,27 +828,27 @@ def test_nwb2asset(simple2_nwb: Path) -> None: ], ), ], - contentSize=19664, + contentSize=ByteSize(19664), encodingFormat="application/x-nwb", digest={DigestType.dandi_etag: "dddddddddddddddddddddddddddddddd-1"}, path=str(simple2_nwb), dateModified=ANY_AWARE_DATETIME, blobDateModified=ANY_AWARE_DATETIME, wasAttributedTo=[ - Participant.unvalidated( + Participant( identifier="mouse001", schemaKey="Participant", - age=PropertyValue.unvalidated( + age=PropertyValue( schemaKey="PropertyValue", unitText="ISO-8601 duration", value="P135DT43200S", - valueReference=PropertyValue.unvalidated( + valueReference=PropertyValue( schemaKey="PropertyValue", value=AgeReferenceType.BirthReference, ), ), - sex=SexType.unvalidated(schemaKey="SexType", name="Unknown"), - species=SpeciesType.unvalidated( + sex=SexType(schemaKey="SexType", name="Unknown"), + species=SpeciesType( schemaKey="SpeciesType", identifier="http://purl.obolibrary.org/obo/NCBITaxon_10090", name="Mus musculus - House mouse", @@ -867,24 +868,24 @@ def test_nwb2asset_remote_asset(nwb_dandiset: SampleDandiset) -> None: mtime = ensure_datetime(asset.get_raw_metadata()["blobDateModified"]) assert isinstance(asset, RemoteBlobAsset) r = asset.as_readable() - assert nwb2asset(r, digest=digest) == BareAsset.unvalidated( + assert nwb2asset(r, digest=digest) == BareAsset.model_construct( schemaKey="Asset", schemaVersion=DANDI_SCHEMA_VERSION, keywords=["keyword1", "keyword 2"], access=[ - AccessRequirements.unvalidated( + AccessRequirements( schemaKey="AccessRequirements", status=AccessType.OpenAccess ) ], wasGeneratedBy=[ - Session.unvalidated( + Session.model_construct( schemaKey="Session", identifier="session_id1", name="session_id1", description="session_description1", startDate=ANY_AWARE_DATETIME, ), - Activity.unvalidated( + Activity.model_construct( id=AnyFullmatch( r"urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" ), @@ -894,7 +895,7 @@ def test_nwb2asset_remote_asset(nwb_dandiset: SampleDandiset) -> None: startDate=ANY_AWARE_DATETIME, endDate=ANY_AWARE_DATETIME, wasAssociatedWith=[ - Software.unvalidated( + Software( schemaKey="Software", identifier="RRID:SCR_019009", name="DANDI Command Line Interface", @@ -904,27 +905,27 @@ def test_nwb2asset_remote_asset(nwb_dandiset: SampleDandiset) -> None: ], ), ], - contentSize=asset.size, + contentSize=ByteSize(asset.size), encodingFormat="application/x-nwb", digest={DigestType.dandi_etag: digest.value}, path="sub-mouse001.nwb", dateModified=ANY_AWARE_DATETIME, blobDateModified=mtime, wasAttributedTo=[ - Participant.unvalidated( + Participant( identifier="mouse001", schemaKey="Participant", - age=PropertyValue.unvalidated( + age=PropertyValue( schemaKey="PropertyValue", unitText="ISO-8601 duration", value="P135DT43200S", - valueReference=PropertyValue.unvalidated( + valueReference=PropertyValue( schemaKey="PropertyValue", value=AgeReferenceType.BirthReference, ), ), - sex=SexType.unvalidated(schemaKey="SexType", name="Unknown"), - species=SpeciesType.unvalidated( + sex=SexType(schemaKey="SexType", name="Unknown"), + species=SpeciesType( schemaKey="SpeciesType", identifier="http://purl.obolibrary.org/obo/NCBITaxon_10090", name="Mus musculus - House mouse", diff --git a/dandi/upload.py b/dandi/upload.py index 1fec6e75c..c5f553514 100644 --- a/dandi/upload.py +++ b/dandi/upload.py @@ -345,7 +345,7 @@ def process_path(dfile: DandiFile) -> Iterator[dict]: try: metadata = dfile.get_metadata( digest=file_etag, ignore_errors=allow_any_path - ).json_dict() + ).model_dump(mode="json", exclude_none=True) except Exception as e: raise UploadError("failed to extract metadata: %s" % str(e)) diff --git a/dandi/utils.py b/dandi/utils.py index 1fada784d..9576f8e5b 100644 --- a/dandi/utils.py +++ b/dandi/utils.py @@ -26,7 +26,7 @@ from urllib.parse import parse_qs, urlparse, urlunparse import dateutil.parser -from pydantic import AnyHttpUrl, BaseModel, Field +from pydantic import BaseModel, Field import requests import ruamel.yaml from semantic_version import Version @@ -531,7 +531,9 @@ def delayed(*args, **kwargs): class ServiceURL(BaseModel): - url: AnyHttpUrl + # Don't use pydantic.AnyHttpUrl, as that adds a trailing slash, and so URLs + # retrieved for known instances won't match the known values + url: str class ServerServices(BaseModel): @@ -557,11 +559,11 @@ def get_instance(dandi_instance_id: str | DandiInstance) -> DandiInstance: instance = dandi_instance_id dandi_id = instance.name elif dandi_instance_id.lower().startswith(("http://", "https://")): - redirector_url = dandi_instance_id - dandi_id = known_instances_rev.get(redirector_url.rstrip("/")) + redirector_url = dandi_instance_id.rstrip("/") + dandi_id = known_instances_rev.get(redirector_url) if dandi_id is not None: instance = known_instances[dandi_id] - is_api = instance.api.rstrip("/") == redirector_url.rstrip("/") + is_api = instance.api.rstrip("/") == redirector_url else: instance = None is_api = False @@ -574,7 +576,7 @@ def get_instance(dandi_instance_id: str | DandiInstance) -> DandiInstance: assert instance is not None return _get_instance(instance.api.rstrip("/"), True, instance, dandi_id) else: - return _get_instance(redirector_url.rstrip("/"), is_api, instance, dandi_id) + return _get_instance(redirector_url, is_api, instance, dandi_id) @lru_cache @@ -583,13 +585,13 @@ def _get_instance( ) -> DandiInstance: try: if is_api: - r = requests.get(f"{url}/info/") + r = requests.get(joinurl(url, "/info/")) else: - r = requests.get(f"{url}/server-info") + r = requests.get(joinurl(url, "/server-info")) if r.status_code == 404: - r = requests.get(f"{url}/api/info/") + r = requests.get(joinurl(url, "/api/info/")) r.raise_for_status() - server_info = ServerInfo.parse_obj(r.json()) + server_info = ServerInfo.model_validate(r.json()) except Exception as e: lgr.warning("Request to %s failed (%s)", url, str(e)) if instance is not None: @@ -615,18 +617,23 @@ def _get_instance( raise BadCliVersionError(our_version, minversion, bad_versions) api_url = server_info.services.api.url if dandi_id is None: - dandi_id = api_url.host - assert dandi_id is not None - if api_url.port is not None: - if ":" in dandi_id: - dandi_id = f"[{dandi_id}]" - dandi_id += f":{api_url.port}" + # Don't use pydantic.AnyHttpUrl, as that sets the `port` attribute even + # if it's not present in the string. + bits = urlparse(api_url) + if bits.hostname is not None: + dandi_id = bits.hostname + if bits.port is not None: + if ":" in dandi_id: + dandi_id = f"[{dandi_id}]" + dandi_id += f":{bits.port}" + else: + dandi_id = api_url return DandiInstance( name=dandi_id, - gui=str(server_info.services.webui.url) + gui=server_info.services.webui.url if server_info.services.webui is not None else None, - api=str(api_url), + api=api_url, ) @@ -860,3 +867,17 @@ def post_upload_size_check(path: Path, pre_check_size: int, erroring: bool) -> N lgr.error(msg) else: raise RuntimeError(msg) + + +def joinurl(base: str, path: str) -> str: + """ + Append a slash-separated ``path`` to a base URL ``base``. The two + components are separated by a single slash, removing any excess slashes + that would be present after naïve concatenation. + + If ``path`` is already an absolute URL, it is returned unchanged. + """ + if path.lower().startswith(("http://", "https://")): + return path + else: + return base.rstrip("/") + "/" + path.lstrip("/")