diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..68bc17f --- /dev/null +++ b/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..9c5b094 --- /dev/null +++ b/README.md @@ -0,0 +1,150 @@ +## Python Open Photogrammetry Format (OPF) + +This repository provides a Python package for reading, writing and manipulating projects in the OPF format. +For more information about what OPF is and its full specification, please refer to https://www.github.com/Pix4D/opf-spec + +### Installation + +The tool can be installed using `pip` with the following command: + +```shell +pip install pyopf +``` + +This command installs the `pyopf` package and tools. + + +### Structure of the PyOPF repository + +The `pyopf` library can be found under `src/pyopf`. The library implements easy parsing and writing of OPF projects in Python. + +Below is a small example, printing the calibrated position and orientation of a camera, knowing its ID. + +```python +from pyopf.io import load + +from pyopf.resolve import resolve +from pyopf.uid64 import Uid64 + +# Path to the example project file. +project_path = "spec/examples/project.json" + +# We are going to search for the calibrated position of the camera with this ID +camera_id = Uid64(hex = "0x57282923") + +# Load the json data and resolve the project, i.e. load the project items as named attributes. +project = load(project_path) +project = resolve(project) + +# Many objects are optional in OPF. If they are missing, they are set to None. +if project.calibration is None: + print("No calibration data.") + exit(1) + +# Filter the list of calibrated cameras to find the one with the ID we are looking for. +calibrated_camera = [camera for camera in project.calibration.calibrated_cameras.cameras if camera.id == camera_id] + +# Print the pose of the camera. +print("The camera {} is calibrated at:".format(camera_id), calibrated_camera[0].position) +print("with orientation", calibrated_camera[0].orientation_deg) +``` + +The custom attributes are stored per node in the `custom_attributes` dictionary. This dictionary might be `None` if +the `Node` has no associated custom attributes. Below is an example of setting a custom attribute. + +```python +import numpy as np +from pathlib import Path +from pyopf.pointcloud import GlTFPointCloud + +pcl = GlTFPointCloud.open(Path('dense_pcl/dense_pcl.gltf')) + +# Generate a new point attribute as a random vector of 0s and 1s +# The attribute must have one scalar per point +new_attribute = np.random.randint(0, 2, size=len(pcl.nodes[0])) + +# The attribute must have the shape (number_of_points, 1) +new_attribute = new_attribute.reshape((-1, 1)) +# Supported types for custom attributes are np.float32, np.uint32, np.uint16, np.uint8 +new_attribute = new_attribute.astype(np.uint32) + +# Set the new attribute as a custom attribute for the node +# By default, nodes might be missing custom attributes, so the dictionary might have to be created +if pcl.nodes[0].custom_attributes is not None: + pcl.nodes[0].custom_attributes['point_class'] = new_attribute +else: + pcl.nodes[0].custom_attributes = {'point_class': new_attribute} + +pcl.write(Path('out/out.gltf')) +``` + +### OPF Tools + +We provide a few tools as command line scripts to help manipulate OPF projects in different ways. + +#### Merging + +The main use case for merging projects is to be able to process smaller sections of a project independently. +For the merging to succeed the sub projects must be in the same coordinate reference system. Note that the tool doesn't support merging the content of most OPF extensions, which will then be dropped in the merged project. +Two objects are considered identical if they have the same ID even if they are in different projects. If this assumption is violated, the merging fails. For example, the same camera ID cannot be associated with two different image URIs. +The only exception are the sensors, whose IDs are always regenerated and for which no attempt is made at finding common and equally calibrated sensors. + +The point clouds are merged based on their label. + +Only core project items support merging: +* camera list +* input cameras +* projected input cameras +* input control points +* projected control points +* calibration (calibrated cameras, calibrated control points, tracks) +* point clouds +* constraints + +All extensions are dropped. + +The merging tool can be called using + +`opf_merge project_1.opf project_2.opf project_3.opf output_directory` + + +#### Undistorting + +A tool to undistort images is provided. The undistorted images will be stored in their original location, but in an `undistort` directory. Only images taken with a perspective camera, for which the sensor has been calibrated will be undistorted. + +This tool can be used as + +`opf_undistort project.opf` + +#### Cropping + +We call "cropping" the operation of preserving only the region of interest of the project (as defined by the Region of +Interest OPF extension). +The project to be cropped *MUST* contain an item of type `ext_pix4d_region_of_interest`. + +During the cropping process, only the control points and the part of the point clouds which are contained in the ROI are kept. +Cameras which do not see any remaining points from the point clouds are discarded. +Also, cropping uncalibrated projects is not supported. + +The following project items are updated during cropping: +* Point Clouds (including tracks) +* Cameras (input, projected, calibrated, camera list) +* GCPs + +The rest of the project items are simply copied. + +The cropping tool can be called using + +`opf_crop project_to_crop.opf output_directory` + +## License and citation + +If you use this work in your research or projects, we kindly request that you cite it as follows: + +The Open Photogrammetry Format Specification, Grégoire Krähenbühl, Klaus Schneider-Zapp, Bastien Dalla Piazza, Juan Hernando, Juan Palacios, Massimiliano Bellomo, Mohamed-Ghaïth Kaabi, Christoph Strecha, Pix4D, 2023, retrived from https://pix4d.github.io/opf-spec/ + +Copyright (c) 2023 Pix4D SA + +All scripts and/or code contained in this repository are licensed under Apache License 2.0. + +Third party documents or tools that are used or referred to in this specification are licensed under their own terms by their respective copyright owners. diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..a2162b7 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,830 @@ +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. + +[[package]] +name = "attrs" +version = "22.2.0" +description = "Classes Without Boilerplate" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] +tests = ["attrs[tests-no-zope]", "zope.interface"] +tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] + +[[package]] +name = "cfgv" +version = "3.3.1" +description = "Validate configuration and produce human readable error messages." +category = "dev" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, + {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dataclasses-json" +version = "0.5.7" +description = "Easily serialize dataclasses to and from JSON" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "dataclasses-json-0.5.7.tar.gz", hash = "sha256:c2c11bc8214fbf709ffc369d11446ff6945254a7f09128154a7620613d8fda90"}, + {file = "dataclasses_json-0.5.7-py3-none-any.whl", hash = "sha256:bc285b5f892094c3a53d558858a88553dd6a61a11ab1a8128a0e554385dcc5dd"}, +] + +[package.dependencies] +marshmallow = ">=3.3.0,<4.0.0" +marshmallow-enum = ">=1.5.1,<2.0.0" +typing-inspect = ">=0.4.0" + +[package.extras] +dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest (>=6.2.3)", "simplejson", "types-dataclasses"] + +[[package]] +name = "deprecated" +version = "1.2.13" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, + {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"] + +[[package]] +name = "distlib" +version = "0.3.6" +description = "Distribution utilities" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, + {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.10.0" +description = "A platform independent file lock." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "filelock-3.10.0-py3-none-any.whl", hash = "sha256:e90b34656470756edf8b19656785c5fea73afa1953f3e1b0d645cef11cab3182"}, + {file = "filelock-3.10.0.tar.gz", hash = "sha256:3199fd0d3faea8b911be52b663dfccceb84c95949dd13179aa21436d1a79c4ce"}, +] + +[package.extras] +docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.2.1)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "identify" +version = "2.5.20" +description = "File identification library for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "identify-2.5.20-py2.py3-none-any.whl", hash = "sha256:5dfef8a745ca4f2c95f27e9db74cb4c8b6d9916383988e8791f3595868f78a33"}, + {file = "identify-2.5.20.tar.gz", hash = "sha256:c8b288552bc5f05a08aff09af2f58e6976bf8ac87beb38498a0e3d98ba64eb18"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "marshmallow" +version = "3.19.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "marshmallow-3.19.0-py3-none-any.whl", hash = "sha256:93f0958568da045b0021ec6aeb7ac37c81bfcccbb9a0e7ed8559885070b3a19b"}, + {file = "marshmallow-3.19.0.tar.gz", hash = "sha256:90032c0fd650ce94b6ec6dc8dfeb0e3ff50c144586462c389b81a07205bedb78"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)", "pytest", "pytz", "simplejson", "tox"] +docs = ["alabaster (==0.7.12)", "autodocsumm (==0.2.9)", "sphinx (==5.3.0)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] +lint = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "marshmallow-enum" +version = "1.5.1" +description = "Enum field for Marshmallow" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "marshmallow-enum-1.5.1.tar.gz", hash = "sha256:38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58"}, + {file = "marshmallow_enum-1.5.1-py2.py3-none-any.whl", hash = "sha256:57161ab3dbfde4f57adeb12090f39592e992b9c86d206d02f6bd03ebec60f072"}, +] + +[package.dependencies] +marshmallow = ">=2.0.0" + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.7.0" +description = "Node.js virtual environment builder" +category = "dev" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.7.0-py2.py3-none-any.whl", hash = "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e"}, + {file = "nodeenv-1.7.0.tar.gz", hash = "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.24.2" +description = "Fundamental package for array computing in Python" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"}, + {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"}, + {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"}, + {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"}, + {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"}, + {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"}, + {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"}, + {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"}, + {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"}, + {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"}, + {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"}, +] + +[[package]] +name = "packaging" +version = "23.0" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, + {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, +] + +[[package]] +name = "pillow" +version = "9.5.0" +description = "Python Imaging Library (Fork)" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, + {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, + {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, + {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, + {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, + {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, + {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, + {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, + {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, + {file = "Pillow-9.5.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47"}, + {file = "Pillow-9.5.0-cp37-cp37m-win32.whl", hash = "sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7"}, + {file = "Pillow-9.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f"}, + {file = "Pillow-9.5.0-cp38-cp38-win32.whl", hash = "sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc"}, + {file = "Pillow-9.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865"}, + {file = "Pillow-9.5.0-cp39-cp39-win32.whl", hash = "sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964"}, + {file = "Pillow-9.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, + {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "platformdirs" +version = "3.1.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, +] + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "3.1.1" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pre_commit-3.1.1-py2.py3-none-any.whl", hash = "sha256:b80254e60668e1dd1f5c03a1c9e0413941d61f568a57d745add265945f65bfe8"}, + {file = "pre_commit-3.1.1.tar.gz", hash = "sha256:d63e6537f9252d99f65755ae5b79c989b462d511ebbc481b561db6a297e1e865"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "pygltflib" +version = "1.15.5" +description = "Python library for reading, writing and managing 3D objects in the Khronos Group gltf and gltf2 formats." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pygltflib-1.15.5.tar.gz", hash = "sha256:e02f488f48c29e1f2d3736eaa4103b801a457fa7172c8c629d638af50e312d85"}, +] + +[package.dependencies] +dataclasses-json = ">=0.0.25" +deprecated = "*" + +[[package]] +name = "pytest" +version = "7.2.2" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, + {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + +[[package]] +name = "setuptools" +version = "67.6.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, + {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shapely" +version = "2.0.1" +description = "Manipulation and analysis of geometric objects" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b06d031bc64149e340448fea25eee01360a58936c89985cf584134171e05863f"}, + {file = "shapely-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9a6ac34c16f4d5d3c174c76c9d7614ec8fe735f8f82b6cc97a46b54f386a86bf"}, + {file = "shapely-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:865bc3d7cc0ea63189d11a0b1120d1307ed7a64720a8bfa5be2fde5fc6d0d33f"}, + {file = "shapely-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45b4833235b90bc87ee26c6537438fa77559d994d2d3be5190dd2e54d31b2820"}, + {file = "shapely-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce88ec79df55430e37178a191ad8df45cae90b0f6972d46d867bf6ebbb58cc4d"}, + {file = "shapely-2.0.1-cp310-cp310-win32.whl", hash = "sha256:01224899ff692a62929ef1a3f5fe389043e262698a708ab7569f43a99a48ae82"}, + {file = "shapely-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:da71de5bf552d83dcc21b78cc0020e86f8d0feea43e202110973987ffa781c21"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:502e0a607f1dcc6dee0125aeee886379be5242c854500ea5fd2e7ac076b9ce6d"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7d3bbeefd8a6a1a1017265d2d36f8ff2d79d0162d8c141aa0d37a87063525656"}, + {file = "shapely-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f470a130d6ddb05b810fc1776d918659407f8d025b7f56d2742a596b6dffa6c7"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4641325e065fd3e07d55677849c9ddfd0cf3ee98f96475126942e746d55b17c8"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90cfa4144ff189a3c3de62e2f3669283c98fb760cfa2e82ff70df40f11cadb39"}, + {file = "shapely-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70a18fc7d6418e5aea76ac55dce33f98e75bd413c6eb39cfed6a1ba36469d7d4"}, + {file = "shapely-2.0.1-cp311-cp311-win32.whl", hash = "sha256:09d6c7763b1bee0d0a2b84bb32a4c25c6359ad1ac582a62d8b211e89de986154"}, + {file = "shapely-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d8f55f355be7821dade839df785a49dc9f16d1af363134d07eb11e9207e0b189"}, + {file = "shapely-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:83a8ec0ee0192b6e3feee9f6a499d1377e9c295af74d7f81ecba5a42a6b195b7"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a529218e72a3dbdc83676198e610485fdfa31178f4be5b519a8ae12ea688db14"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91575d97fd67391b85686573d758896ed2fc7476321c9d2e2b0c398b628b961c"}, + {file = "shapely-2.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b0d834b11be97d5ab2b4dceada20ae8e07bcccbc0f55d71df6729965f406ad"}, + {file = "shapely-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:b4f0711cc83734c6fad94fc8d4ec30f3d52c1787b17d9dca261dc841d4731c64"}, + {file = "shapely-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:05c51a29336e604c084fb43ae5dbbfa2c0ef9bd6fedeae0a0d02c7b57a56ba46"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b519cf3726ddb6c67f6a951d1bb1d29691111eaa67ea19ddca4d454fbe35949c"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:193a398d81c97a62fc3634a1a33798a58fd1dcf4aead254d080b273efbb7e3ff"}, + {file = "shapely-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e55698e0ed95a70fe9ff9a23c763acfe0bf335b02df12142f74e4543095e9a9b"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32a748703e7bf6e92dfa3d2936b2fbfe76f8ce5f756e24f49ef72d17d26ad02"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a34a23d6266ca162499e4a22b79159dc0052f4973d16f16f990baa4d29e58b6"}, + {file = "shapely-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d173d24e85e51510e658fb108513d5bc11e3fd2820db6b1bd0522266ddd11f51"}, + {file = "shapely-2.0.1-cp38-cp38-win32.whl", hash = "sha256:3cb256ae0c01b17f7bc68ee2ffdd45aebf42af8992484ea55c29a6151abe4386"}, + {file = "shapely-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c7eed1fb3008a8a4a56425334b7eb82651a51f9e9a9c2f72844a2fb394f38a6c"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ac1dfc397475d1de485e76de0c3c91cc9d79bd39012a84bb0f5e8a199fc17bef"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33403b8896e1d98aaa3a52110d828b18985d740cc9f34f198922018b1e0f8afe"}, + {file = "shapely-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2569a4b91caeef54dd5ae9091ae6f63526d8ca0b376b5bb9fd1a3195d047d7d4"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a70a614791ff65f5e283feed747e1cc3d9e6c6ba91556e640636bbb0a1e32a71"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c43755d2c46b75a7b74ac6226d2cc9fa2a76c3263c5ae70c195c6fb4e7b08e79"}, + {file = "shapely-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad81f292fffbd568ae71828e6c387da7eb5384a79db9b4fde14dd9fdeffca9a"}, + {file = "shapely-2.0.1-cp39-cp39-win32.whl", hash = "sha256:b50c401b64883e61556a90b89948297f1714dbac29243d17ed9284a47e6dd731"}, + {file = "shapely-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:bca57b683e3d94d0919e2f31e4d70fdfbb7059650ef1b431d9f4e045690edcd5"}, + {file = "shapely-2.0.1.tar.gz", hash = "sha256:66a6b1a3e72ece97fc85536a281476f9b7794de2e646ca8a4517e2e3c1446893"}, +] + +[package.dependencies] +numpy = ">=1.14" + +[package.extras] +docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "simplejson" +version = "3.18.4" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +category = "main" +optional = false +python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "simplejson-3.18.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:8f381747c2edebe3c750a571e55103bfcc33b2707a9b91ae033ab9ba718d976a"}, + {file = "simplejson-3.18.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:094275b1b8f003afce1167c8a674cd1ee2fd48c566632dac5d149901d5012ff8"}, + {file = "simplejson-3.18.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:676e8c182f8079851f12ae1cee2fcebe04def2da2a5703a9d747ab125af47732"}, + {file = "simplejson-3.18.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:4b5df4ee48403885046c6f4fd8adc84c4ac0adec69482f22a17bd4ba52876341"}, + {file = "simplejson-3.18.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:edb334cab35dcd90eb563fdacb085f10e5dd0b1acb57fa43f8933308b42a8f88"}, + {file = "simplejson-3.18.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:b6c6cfc492710d8f0303705fa1ff7bb3d6a145f523384e45a6f3b13ada37021f"}, + {file = "simplejson-3.18.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ced906b172bfad62736a27cfafcb6e24bc9938533b0529ff8150f7926fe35b54"}, + {file = "simplejson-3.18.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:7701a289d45fdfeb37f1d15cf638801cea439df667a613379443772a86e82936"}, + {file = "simplejson-3.18.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:e2f87a483c4ab0bb2a9adc9ca09173e7f7cf3696e4fa67bd45a6b33181e57921"}, + {file = "simplejson-3.18.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c0444423129df448788edc66a129bc7560ad7d6a661d74f0900959c0b44349a1"}, + {file = "simplejson-3.18.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29a86bc9c8a913a4e0ffab85c563a7505cdf4bd13fba05342f8314facc0b7586"}, + {file = "simplejson-3.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e2fa1ee5ca34ab2ecfbe3f7a7e952a1ecaebb5b4818f002b5b146324912ac3d5"}, + {file = "simplejson-3.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b17026f3f349a6e87818cd3531e3bbb5cc78a6f4b2b6718f574a8e0512d71e08"}, + {file = "simplejson-3.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a255d30cda6334ba780eb40a56e8134efd3453948b995d3966e45212e34bf018"}, + {file = "simplejson-3.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9f0dfde448611f4f818da05f9b544a78f29355dc39151b0dad8e7c65c513e4f"}, + {file = "simplejson-3.18.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1085cadec0f7e76377951d7a87744628c90ac6cc634fc97eecce0c4d41ec563"}, + {file = "simplejson-3.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f15f56b3119fb71fa57eb4613bcd87eb7df6c2f3547de7d341853d3e50cef97e"}, + {file = "simplejson-3.18.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:695da62e494e4689ab78fae173a78390a175b6a5ccc4292277ce0f8dba3945d5"}, + {file = "simplejson-3.18.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:097e48686e49026836ef384c7c10ca670acc023cb16a976a689c2eb6c1852df4"}, + {file = "simplejson-3.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a56005332d70b8d02d476d4a85818b27b01e51dac1a21d5c1a1d8a5df2efb4a6"}, + {file = "simplejson-3.18.4-cp310-cp310-win32.whl", hash = "sha256:3d549efc7e8f9a180c59462b124991b690ff25c235d5cf495c3246c66a7679cd"}, + {file = "simplejson-3.18.4-cp310-cp310-win_amd64.whl", hash = "sha256:bd694c465cc61fa8e599355e535b6eb561279834d9883aeef08d0e86c44c300c"}, + {file = "simplejson-3.18.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad37f25fd8dfbed80815c3281b82a165be2a74e663856b9a50099d18789987bc"}, + {file = "simplejson-3.18.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2362c66d2c633925d90f2f177f05e0570d320d986130d34dff9ad6edbf7be8ac"}, + {file = "simplejson-3.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e381471158290ccb79bd31e7bbda4c8f2cf7e1a5f6b557c1b97d6036ccd05b"}, + {file = "simplejson-3.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d45ed9452a42064805143480397b586ea2ea322f4b8b69034c51181e7f38342"}, + {file = "simplejson-3.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0dcc54e7cfbd9674ec4ca181e26eaa5b038446601faeaa6c83d146ddef2f2652"}, + {file = "simplejson-3.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05a668d4a93816fb8a644e90e7987aa3beeb9d2112ca50a474d41e6acb5bb88a"}, + {file = "simplejson-3.18.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da6dc0cb00ef1e1a8daf285074ca8b2bb89591170c42ceab0c37bcdb9adc802c"}, + {file = "simplejson-3.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f31e126204ec38f92dee119af87cf881044ef7dea6f7477ef774ed3d84199c24"}, + {file = "simplejson-3.18.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fb0f8b35c11fd8e4b924f974d331b20fa54555282451db7f2a3b24bd2d33cc11"}, + {file = "simplejson-3.18.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:2d1b47f768e1f4c1c8a9457effabed735939401e85c0ddcdf68444c88a9242e6"}, + {file = "simplejson-3.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6d65ea4582b47d77e9094c22eb0aeded0ebd96c1df86e988870b40c6514c6e21"}, + {file = "simplejson-3.18.4-cp311-cp311-win32.whl", hash = "sha256:32de1672f91a789cc9e1c36c406b2d75457a242d64e9e73a70b9b814ef00095e"}, + {file = "simplejson-3.18.4-cp311-cp311-win_amd64.whl", hash = "sha256:c37b092d29741096c4723f48924a80b1d3de62ca1de254ce88178fa083dd520c"}, + {file = "simplejson-3.18.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:706a7fc81ceeb321a1040d008b134056012188f95a5c31ad94fb03153b35cc84"}, + {file = "simplejson-3.18.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab64f087c5863ac621b42e227e5a43bd9b28de581afe7be12ad96562b9be8203"}, + {file = "simplejson-3.18.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f27a079cb009ba569983061a50a9270b7e1d35f81e4eeaf0e26f8924027e550"}, + {file = "simplejson-3.18.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ba80fbf959b5852554f23201a5f4b30885930c303546ffa883859a435ea3cf"}, + {file = "simplejson-3.18.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdb5069870f7d26a34e5adc30672d0a7b26e652720530a023bb3a8d8a42e37f"}, + {file = "simplejson-3.18.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:340b7d085b4a5063aacb8664b1250e4a7426c16e1cc80705c548a229153af147"}, + {file = "simplejson-3.18.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:b9893852c559998f667e6434d2c2474518d4cdfd1b9cec8e57b3c9d577ba55c1"}, + {file = "simplejson-3.18.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:efae49d0148ec68b6e012f1b9e19bd530f4dced378ba919e3e906ae2b829cc31"}, + {file = "simplejson-3.18.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a89d7fe994b115f0a792e6673f387af3db812a1760d594abad51e0ea11d3e470"}, + {file = "simplejson-3.18.4-cp36-cp36m-win32.whl", hash = "sha256:44058bea97429cfa0d6fb1d8eb0736a77022f34a326d5bc64fd6fed8d9304571"}, + {file = "simplejson-3.18.4-cp36-cp36m-win_amd64.whl", hash = "sha256:f85d87986ca375b8305b5c4f166783b8db383a6469e8b99b8dba22878388f234"}, + {file = "simplejson-3.18.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a3bba99178f1b25878752a8bc6da2f93fbae754ebd4914d2ac4b869b9fb24102"}, + {file = "simplejson-3.18.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5f67bffa6fc68e391b2250e1feb43d534ded64a7b918eb89cf7e3e679759d94"}, + {file = "simplejson-3.18.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8ac155e3fd3b54a63040df024e57e62c130b15a2fc66eff3c2a946f42beed52"}, + {file = "simplejson-3.18.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:682b202f56d9d9e1bb22eaca3e37321002223fd5ddef7189b9233e3c14079917"}, + {file = "simplejson-3.18.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dbfaa79b1c0efdb768392a19110f1aff793f3e8d43f57e292f46734b8affb45"}, + {file = "simplejson-3.18.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7339bd6203351555c1e728acd601ba95ebce0f6041ebdb386e025f00af3f1769"}, + {file = "simplejson-3.18.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:544e5607142d66a469ecf78a3154ec0f915834dc3b8cfdb2677a78ca58319ad6"}, + {file = "simplejson-3.18.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:56d36f47bc7c7684504f0f18feb161a0b1162546b3622e45aa6155f8285180ac"}, + {file = "simplejson-3.18.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b482d1fdd8f860e743c7de8cd6dfe54fb9fe8cd6ccba29e2966912ac89e17b2f"}, + {file = "simplejson-3.18.4-cp37-cp37m-win32.whl", hash = "sha256:313dfd911723dc3022fed7050a7b315d5d0681cd56eee08e44e2cbd39fd9ad81"}, + {file = "simplejson-3.18.4-cp37-cp37m-win_amd64.whl", hash = "sha256:f5e0a03e533313eee9437ccc6c4eab47369f17bc919b57df4a20ccd8bc85d8fd"}, + {file = "simplejson-3.18.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c4f59dd358c3a99efa46d62dc1583be3a1c37171f5240c4cbdc2d5838870902"}, + {file = "simplejson-3.18.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:041dd69026284d10f035cefb4a75026d2cfcef31f31e62585eeb2b7776e7e047"}, + {file = "simplejson-3.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47509775a5c41ec2a6cd17c9c00fc14965cad8e6670059663872ba5e39332f57"}, + {file = "simplejson-3.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b425a857ce52e651739314e4118fc68bd702ef983148b8fd5cb6f68bb6a020"}, + {file = "simplejson-3.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:deb71e6166e4f1264174d78b5b88abd52b14c6649e6eabaf9cf93cb1c7362850"}, + {file = "simplejson-3.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:827ddc3b3603f7d0421b054388da6face7871d800c4b3bbedeedc8778e4085ea"}, + {file = "simplejson-3.18.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc74a9ef4d61e18ee6f1886b6ef1fe285b1f432885288afacfb7402f7d469448"}, + {file = "simplejson-3.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16fbebfc38ad4285c256d2430797fd669b0437d090e985c6d443521d4303b133"}, + {file = "simplejson-3.18.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e7d3f7cd57ce0c6a5bb8133f8ed5c3d1be0473a88b7d91a300626298f12d0999"}, + {file = "simplejson-3.18.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b43d3c2e204d709af955bdb904ae127fe137363ace87fbf7dc8fe6017f7f8449"}, + {file = "simplejson-3.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ab5941e1fd509fc151258477ef4b663fe14c94f8faf3581827bf4b02080fd4ba"}, + {file = "simplejson-3.18.4-cp38-cp38-win32.whl", hash = "sha256:a1163bfe5d043c20adeb5c4c8e89dd1dd39b375c8ca6f1c1e35ec537ad7a12e7"}, + {file = "simplejson-3.18.4-cp38-cp38-win_amd64.whl", hash = "sha256:8ccc982197982cdda19e3e5ba4ef7f6ad6bed3eb39bb423bfbf7fa2cd29488ab"}, + {file = "simplejson-3.18.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01f426ee9e3a2d205aa4c22c3da996b51f2de75c4199ef703258a28b304dea8c"}, + {file = "simplejson-3.18.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46b8cc86204b51eddcf157cbaf3c44a20f24393030442af0909eeb961186cb67"}, + {file = "simplejson-3.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:65de5876e34780b43f92d9d2539de16ecc56d16f56e56e59b34adfa1cebe064f"}, + {file = "simplejson-3.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa6fe8fa94a831886ee164ac03514f361e1387a62a1b9da32fde5c0c1f27fa8d"}, + {file = "simplejson-3.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a50a9da1cf93e35f26c4ddee162abf3184a340339ec2d4001c34607b87e71b4"}, + {file = "simplejson-3.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a2285609b4edbf9957440642493788ebef6583042b3fb96217c2e71f29bc6d80"}, + {file = "simplejson-3.18.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b217201efc007166e24e9a282007cc208a2d059350a7c5bd0b0303460ad3019"}, + {file = "simplejson-3.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cc9a47bf8cde85c99db5f4a919bb756e62427ade0f2e875a6ec89ae8492d486"}, + {file = "simplejson-3.18.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e042ae053e05fe193514d51d6b0f0243729961901e9a75f8b596bfaf69522c52"}, + {file = "simplejson-3.18.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:d0d3b9f7cee233368d92c89746dde74313abafaa3ec1f0c06a3f4f164dc27bcc"}, + {file = "simplejson-3.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1844d7782652f859d9648531778582d4842d80cfff8d334eb23bb8da0d22a1b0"}, + {file = "simplejson-3.18.4-cp39-cp39-win32.whl", hash = "sha256:2a6e5c0e0817fb20dbb880c83caebbd4ef39f1901f6f8e53b73a3c74de4e5172"}, + {file = "simplejson-3.18.4-cp39-cp39-win_amd64.whl", hash = "sha256:34d95ad8e27754f0d91917600d6ea273e05c82a71021f168c45be48637d9502f"}, + {file = "simplejson-3.18.4-py3-none-any.whl", hash = "sha256:03de1ec4ad734f28ca49b0a758b997d752be0d089ed30360157c4e8811999c8f"}, + {file = "simplejson-3.18.4.tar.gz", hash = "sha256:6197cfebe659ac802a686b5408494115a7062b45cdf37679c4d6a9d4f39649b7"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "typing-extensions" +version = "4.5.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, +] + +[[package]] +name = "typing-inspect" +version = "0.8.0" +description = "Runtime inspection utilities for typing module." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.8.0-py3-none-any.whl", hash = "sha256:5fbf9c1e65d4fa01e701fe12a5bca6c6e08a4ffd5bc60bfac028253a447c5188"}, + {file = "typing_inspect-0.8.0.tar.gz", hash = "sha256:8b1ff0c400943b6145df8119c41c244ca8207f1f10c9c057aeed1560e4806e3d"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "virtualenv" +version = "20.21.0" +description = "Virtual Python Environment builder" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.21.0-py3-none-any.whl", hash = "sha256:31712f8f2a17bd06234fa97fdf19609e789dd4e3e4bf108c3da71d710651adbc"}, + {file = "virtualenv-20.21.0.tar.gz", hash = "sha256:f50e3e60f990a0757c9b68333c9fdaa72d7188caa417f96af9e52407831a3b68"}, +] + +[package.dependencies] +distlib = ">=0.3.6,<1" +filelock = ">=3.4.1,<4" +platformdirs = ">=2.4,<4" + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"] +test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23)", "pytest (>=7.2.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "wrapt" +version = "1.15.0" +description = "Module for decorators, wrappers and monkey patching." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, +] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.10" +content-hash = "b3f547c57fa3def8eb46004a773ee755fb3e97434b8942ae3a0a71be227154dd" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f7a43e8 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,70 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "pyopf" +version = "1.0.0" +description = "Python library for I/O and manipulation of projects under the Open Photogrammetry Format (OPF)" +requires-python = ">=3.10" +dependencies = ["numpy>=1.24.1", "pygltflib>=1.15.3", "shapely>=2.0.1", "argparse>=1.4.0", "python-dateutil>=2.8.2"] +readme = "README.md" +license = { file = "LICENSE" } +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Development Status :: 5 - Production/Stable", + "Operating System :: OS Independent", +] +keywords = ["photogrammetry", "OPF"] + +[project.urls] +Homepage = "https://pix4d.github.io/opf-spec/specification/project.html" + +[project.optional-dependencies] +tests = [ + "simplejson>=18.3" +] + +[project.scripts] +opf_crop = "opf_tools.crop.cropper:main" +opf_merge = "opf_tools.merge.merger:main" +opf_undistort = "opf_tools.undistort.undistorter:main" + +[tool.hatch.build] +only_packages = true + +[tool.hatch.build.targets.wheel] +packages = ["src/pyopf", "src/opf_tools"] + +[tool.pytest.ini_options] +minversion = 6.0 +addopts = ["--import-mode=importlib"] +testpaths = ["tests"] +norecursedirs = ["spec"] +pythonpath = ["src", "tests"] + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true + +[tool.poetry] +name = "pyopf" +version = "1.0.0" +description = "Python library for I/O and manipulation of projects under the Open Photogrammetry Format (OPF)" +authors = ["Pix4D"] + +[tool.poetry.dependencies] +python = ">=3.10" +numpy = "*" +pygltflib = "*" +shapely = "*" +python-dateutil = "*" +simplejson = "*" +pillow = "^9.5.0" + +[tool.poetry.dev-dependencies] +pytest = "*" +pre-commit = "*" diff --git a/src/opf_tools/crop/__main__.py b/src/opf_tools/crop/__main__.py new file mode 100644 index 0000000..07b6a7a --- /dev/null +++ b/src/opf_tools/crop/__main__.py @@ -0,0 +1,6 @@ +import sys + +from opf_tools.crop.cropper import main + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/opf_tools/crop/cropper.py b/src/opf_tools/crop/cropper.py new file mode 100644 index 0000000..4a116bc --- /dev/null +++ b/src/opf_tools/crop/cropper.py @@ -0,0 +1,473 @@ +import argparse +import os +from pathlib import Path +from typing import Optional, cast + +import numpy as np +from shapely.geometry import Point +from shapely.geometry.polygon import Polygon + +from pyopf.ext.pix4d_region_of_interest import Pix4DRegionOfInterest +from pyopf.ext.pix4d_region_of_interest import format as RoiFormat +from pyopf.io import load, save +from pyopf.pointcloud import GlTFPointCloud, Matches, Node +from pyopf.project import Calibration, ProjectObjects +from pyopf.resolve import resolve +from pyopf.util import IntType + + +def _get_xy_polygon(points: list[np.ndarray]) -> Polygon: + """Transform a list of points in a 2D polygon. + The third component of the points is ignored. + + :param points: An array of points + :return: The corresponding shapely polygon + """ + return Polygon([(p[0], p[1]) for p in points]) + + +def _make_polygon(boundary: list[IntType], roi: Pix4DRegionOfInterest) -> Polygon: + """Transforms an OPF plane boundary in a 2D polygon by projecting the points on the XY plane. + + :param boundary: An OPF plane boundary, i.e. a list of indices in the plane.vertices3d array + :param roi: The region of interest where the boundary comes from + + :return: The corresponding shapely polygon + """ + return _get_xy_polygon([roi.plane.vertices3d[p_idx] for p_idx in boundary]) + + +class RoiPolygons: + """Small wrapper class over the Pix4DRegionOfInterest to allow inside queries for a point. + Assumes the normal is perpendicular to the plane and that the plane is parallel to the XY plane. + From this assumption it follows that the only allowed normals are (0,0,1) and (0,0,-1). + """ + + roi: Pix4DRegionOfInterest + outer_boundary: Polygon + inner_boundaries: list[Polygon] + height: Optional[float] + + def __init__(self, roi: Pix4DRegionOfInterest, matrix: Optional[np.ndarray] = None): + """Construct a RoiPolygons wrapper for a region of interest. + :raise ValueError: If the plane is not parallel to the XY plane. + :raise ValueError: If the plane normal is not (0,0,1) or (0,0,-1). + """ + + if len(set([p[2] for p in roi.plane.vertices3d])) != 1: + raise ValueError("The plane is not parallel to the XY plane") + + if not np.array_equal( + roi.plane.normal_vector, [0, 0, 1] + ) and not np.array_equal(roi.plane.normal_vector, [0, 0, -1]): + raise ValueError( + "The only supported plane normals are (0,0,1) and (0,0,-1)" + ) + + self.outer_boundary = _make_polygon(roi.plane.outer_boundary, roi) + + self.inner_boundaries = [] + if roi.plane.inner_boundaries is not None: + self.inner_boundaries = [ + _make_polygon(boundary, roi) for boundary in roi.plane.inner_boundaries + ] + + self.height = roi.height + self.matrix = matrix + + self.roi = roi + + def _is_inside_boundaries(self, point: np.ndarray) -> bool: + """Check if a point is inside the outer boundary and outside all inner boundaries. + The point must be in the same system of coordinates as the boudnaries. + :param point: A 3D point + """ + + xy_point = Point(point[0], point[1]) + + return self.outer_boundary.contains(xy_point) and not any( + boundary.contains(xy_point) for boundary in self.inner_boundaries + ) + + def _is_inside_elevation_bounds(self, point: np.ndarray) -> bool: + """Check if a point is within the elevation bounds of the region of interest. + The point must be in the same system of coordinates as the boudnaries. + """ + if self.height is None: + return True + + elevation_difference = point[2] - self.roi.plane.vertices3d[0][2] + + elevation_along_normal = elevation_difference * self.roi.plane.normal_vector[2] + + return elevation_along_normal > 0 and elevation_along_normal < self.height + + def is_inside(self, point: np.ndarray) -> bool: + """Check if a point is inside the ROI. + :param point: The pooint to check. + :return: True if the point is inside the region of interest, False otherwise. + """ + homogeneous_point = np.append(point, 1) + if self.matrix is not None: + opf_matrix_inverse = np.array( + [[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]] + ) + homogeneous_point = opf_matrix_inverse @ self.matrix @ homogeneous_point + + p = homogeneous_point[:3] + + return self._is_inside_boundaries(p) and self._is_inside_elevation_bounds(p) + + +def _filter_array( + array: np.ndarray, flags: np.ndarray, output_file: Path +) -> np.ndarray: + """Filter the array with a binary flag array, writing the output at the indicated path. + The output file will be overwritten if it is already present. + + :param array: The array to filter. It is assumed to be two dimensional. + :param flags: A one dimensional boolean array used to filter the array (by rows). + :param output_file: The file to save the filtered array. + + :return: A memory mapped array representing the filtered data. + + :raise ValueError: If the length of the flags is not the same as the first dimension of the array. + """ + + if len(flags) != len(array): + raise ValueError("The size of the array and the filter do not match") + + kept_entries = np.sum(flags) + + filtered = np.memmap( + output_file, + mode="w+", + dtype=array.dtype, + offset=0, + shape=(kept_entries, array.shape[1]), + ) + + filtered[:] = array[flags] + + return filtered + + +def _copy_array(array: np.memmap | np.ndarray, output_path: Path) -> np.memmap: + """Copies the array to a new location and returns the newly opened array""" + + new_array = np.memmap(output_path, mode="w+", dtype=array.dtype, shape=array.shape) + new_array[:] = array[:] + + return new_array + + +def _filter_matches(matches: Matches, flags: np.ndarray, output_gltf_dir: Path): + matches.point_index_ranges.ranges = _filter_array( + matches.point_index_ranges.ranges, + flags, + output_gltf_dir / "matchPointIndexRanges.bin", + ) + + matches.camera_ids = _copy_array( + matches.camera_ids, output_gltf_dir / "matchCameraIds.bin" + ) + + if matches.image_points is not None: + matches.image_points.featureIds = _copy_array( + matches.image_points.featureIds, output_gltf_dir / "matchFeatureIds.bin" + ) + + matches.image_points.scales = _copy_array( + matches.image_points.scales, output_gltf_dir / "matchScales.bin" + ) + + matches.image_points.pixelCoordinates = _copy_array( + matches.image_points.pixelCoordinates, + output_gltf_dir / "matchPixelCoordinates.bin", + ) + + if matches.image_points.depths is not None: + matches.image_points.depths = _copy_array( + matches.image_points.depths, output_gltf_dir / "matchDepths.bin" + ) + + +def _filter_node( + node: Node, roi: Pix4DRegionOfInterest, output_gltf_dir: Path +) -> Optional[Node]: + """Filters in place a node and returns a reference to it. + If the node is outside the ROI, None is returned and the initial value of the node is left unchanged. + """ + + roi_polygons = RoiPolygons(roi, node.matrix) + + # A list of flags, indicating whether each point should be kept or not + flags = np.apply_along_axis(roi_polygons.is_inside, 1, node.position) + + if not np.any(flags): + return None + + node.position = _filter_array( + node.position, flags, output_gltf_dir / "positions.bin" + ) + + if node.normal is not None: + node.normal = _filter_array(node.normal, flags, output_gltf_dir / "normals.bin") + if node.color is not None: + node.color = _filter_array(node.color, flags, output_gltf_dir / "colors.bin") + if node.custom_attributes is not None: + for name, attribute in node.custom_attributes.items(): + attribute = _filter_array( + attribute, flags, output_gltf_dir / (name + ".bin") + ) + + if node.matches: + _filter_matches(node.matches, flags, output_gltf_dir) + + return node + + +def filter_pointcloud( + pointcloud: GlTFPointCloud, roi: Pix4DRegionOfInterest, output_gltf_dir: Path +) -> None: + """Filters in-place a point cloud with a region of interest, keeping only the points that are inside the ROI. + If a node is outside the region of interest, it will be removed. + If all nodes are outside the ROI, the returned point cloud will not have any nodes. + """ + + maybe_nodes = [ + _filter_node(node, roi, output_gltf_dir) for node in pointcloud.nodes + ] + pointcloud.nodes = [node for node in maybe_nodes if node is not None] + + +def _get_region_of_interest(project: ProjectObjects) -> Pix4DRegionOfInterest: + """Gets the region of interest extension from an OPF project. + + :param project: The project to query. + :return: The region of interest. + + :raise ValueError: If the project contains zero or more than one regions of interest. + """ + + regions_of_interest = project.get_extensions_by_format(RoiFormat) + + if len(regions_of_interest) == 0: + raise ValueError( + "The project has no extensions of type Pix4DRegionOfInterest. Cropping is not supported" + ) + elif len(regions_of_interest) > 1: + raise ValueError( + "The project has multiple extensions of type Pix4DRegionOfInterest. Cropping is not supported" + ) + + return cast(Pix4DRegionOfInterest, regions_of_interest[0]) + + +def _get_pointcloud_objects( + project: ProjectObjects, +) -> list[tuple[str, GlTFPointCloud]]: + def get_point_cloud(obj): + if isinstance(obj, GlTFPointCloud): + return obj + elif isinstance(obj, Calibration): + return obj.tracks + else: + return None + + pointclouds: list[tuple[str, GlTFPointCloud]] = [] + + for key, objects in project.__dict__.items(): + if key.startswith("_"): + continue + for object in objects: + point_cloud = get_point_cloud(object) + if point_cloud is None: + continue + pointclouds.append((str(object.metadata.id), point_cloud)) + + return pointclouds + + +def _used_camera_uids_in_pointcloud(pointcloud: GlTFPointCloud) -> set[str]: + + used_camera_uids = set() + + for node in pointcloud.nodes: + matches = node.matches + if matches: + for o, c in matches.point_index_ranges: + ids = matches.camera_ids[o : o + c].flatten().tolist() + uids = [matches.camera_uids[id] for id in ids] + used_camera_uids.update(uids) + + return used_camera_uids + + +def _used_camera_uids_in_project(project: ProjectObjects) -> set[str]: + + pointclouds = _get_pointcloud_objects(project) + + used_camera_uids = set() + for _, pointcloud in pointclouds: + used_camera_uids.update(_used_camera_uids_in_pointcloud(pointcloud)) + + return used_camera_uids + + +def _filter_cameras_without_points(project: ProjectObjects) -> None: + """Remove all cameras from project which do not see any point in point_cloud""" + + used_camera_uids = _used_camera_uids_in_project(project) + + if project.input_cameras is None or project.projected_input_cameras is None: + raise RuntimeError("Project does not contain input cameras") + if project.calibration is None or project.calibration.calibrated_cameras is None: + raise RuntimeError("Project is not calibrated") + + for capture in project.input_cameras.captures: + capture.cameras = [ + camera for camera in capture.cameras if camera.id in used_camera_uids + ] + project.input_cameras.captures = [ + capture + for capture in project.input_cameras.captures + if len(capture.cameras) > 0 + ] + + used_capture_ids = [capture.id for capture in project.input_cameras.captures] + project.projected_input_cameras.captures = [ + capture + for capture in project.projected_input_cameras.captures + if capture.id in used_capture_ids + ] + + project.calibration.calibrated_cameras.cameras = [ + camera + for camera in project.calibration.calibrated_cameras.cameras + if camera.id in used_camera_uids + ] + + +def _filter_control_points(project: ProjectObjects, roi: Pix4DRegionOfInterest) -> None: + """Filter the control points (MTPs, GCPs) of a project with a region of interest + The filtering is based on the positions of the calibrated control points with respect to the ROI. + The corresponding input and projected control points are filtered based on their IDs. + """ + + if ( + project.calibration is None + or project.calibration.calibrated_control_points is None + ): + return + + roi_polygons = RoiPolygons(roi) + + cps = project.calibration.calibrated_control_points + cps.points = list( + filter(lambda p: roi_polygons.is_inside(p.coordinates), cps.points) + ) + + if project.input_control_points is None or project.projected_control_points is None: + raise RuntimeError("Input and projected control points must be present") + + remaining_ids = [cp.id for cp in cps.points] + project.input_control_points.gcps = [ + gcp for gcp in project.input_control_points.gcps if gcp.id in remaining_ids + ] + project.input_control_points.mtps = [ + mtp for mtp in project.input_control_points.mtps if mtp.id in remaining_ids + ] + project.projected_control_points.projected_gcps = [ + projected_gcp + for projected_gcp in project.projected_control_points.projected_gcps + if projected_gcp.id in remaining_ids + ] + + +def crop( + project: ProjectObjects, output_path: Path, roi: Pix4DRegionOfInterest +) -> None: + """Crop a collection of project objects using the pix4d region of interest extension. + The project must contain exactly one instance of the region of interest extension + and must be calibrated. + + :param project: A resolved project. + :param output_path: A path where to save the new project. + + :raise ValueError: If the project does not contain a unique of the region of interest extension + """ + + output_path = output_path.absolute() + + pointclouds = _get_pointcloud_objects(project) + + for pointcloud_id, pointcloud in pointclouds: + output_dir = output_path / pointcloud_id + os.mkdir(output_dir) + filter_pointcloud(pointcloud, roi, output_dir) + + _filter_cameras_without_points(project) + + _filter_control_points(project, roi) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Simple OPF project cropping tool") + parser.add_argument( + "input", + type=str, + help="An OPF project file", + ) + parser.add_argument( + "outdir", type=str, help="Output directory for the cropped project" + ) + parser.add_argument( + "--roi", + type=str, + default=None, + help="Optional path to a Region of Interest json file", + ) + parser.add_argument( + "--force", + "-f", + dest="force", + action="store_true", + default=False, + help="Do not ask for confirmation when overwriting output files", + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + + output_dir = Path(args.outdir) + if os.path.exists(output_dir): + if len(os.listdir(output_dir)) != 0 and not args.force: + while True: + try: + answer = input( + "The output directory is not empty do you want to procced [yN]? " + ) + except KeyboardInterrupt: + print() + exit(-1) + except EOFError: + answer = "" + if answer == "n" or answer == "N" or answer == "": + exit(0) + if answer == "y" or answer == "Y": + break + else: + os.makedirs(output_dir) + + project = load(args.input) + project = resolve(project, supported_extensions=["ext_pix4d_region_of_interest"]) + + roi = load(args.roi) if args.roi else _get_region_of_interest(project) + + crop(project, Path(output_dir), roi=roi) + + save(project, str(output_dir / "project.opf")) diff --git a/src/opf_tools/merge/__main__.py b/src/opf_tools/merge/__main__.py new file mode 100644 index 0000000..21ac314 --- /dev/null +++ b/src/opf_tools/merge/__main__.py @@ -0,0 +1,6 @@ +import sys + +from opf_tools.merge.merger import main + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/opf_tools/merge/merger.py b/src/opf_tools/merge/merger.py new file mode 100644 index 0000000..7f8ce86 --- /dev/null +++ b/src/opf_tools/merge/merger.py @@ -0,0 +1,876 @@ +import argparse +import copy +import os +import warnings +from collections import defaultdict +from pathlib import Path +from typing import Any, Optional, TypeVar, overload +from uuid import UUID + +import numpy as np + +import pyopf.pointcloud.merge as pcl_merge +from pyopf.cameras import ( + CalibratedCameras, + CameraList, + Capture, + GpsBias, + InputCameras, + ProjectedCapture, + ProjectedInputCameras, + ProjectedSensor, + Sensor, +) +from pyopf.cps import ( + CalibratedControlPoint, + CalibratedControlPoints, + Constraints, + InputControlPoints, + Mark, + OrientationConstraint, + ProjectedControlPoints, + ScaleConstraint, +) +from pyopf.crs import Crs, Geolocation, SceneReferenceFrame +from pyopf.io import load, save +from pyopf.pointcloud import GlTFPointCloud +from pyopf.pointcloud.pcl import ( + opf_axis_rotation_matrix, + opf_axis_rotation_matrix_inverse, +) +from pyopf.project import ( + Calibration, + Metadata, + Project, + ProjectObjects, + ProjectSource, +) +from pyopf.resolve import resolve +from pyopf.types import OpfObject +from pyopf.uid64 import Uid64, uid64 + +Object = TypeVar("Object", bound=OpfObject) + + +def _clear_unsupported_attributes(object: Object) -> Object: + object.unknown_properties = None + object.extensions = None + return object + + +def _are_crss_equal(lhs: Crs, rhs: Crs) -> bool: + # This implementation is very naïve at the moment + return lhs.definition == rhs.definition and lhs.geoid_height == rhs.geoid_height + + +def _are_geolocations_equal( + lhs: Optional[Geolocation], rhs: Optional[Geolocation] +) -> bool: + + return (lhs is None) == (rhs is None) and ( + lhs is None + or ( + bool( + rhs is not None + and _are_crss_equal(lhs.crs, rhs.crs) + and np.equal(lhs.coordinates, rhs.coordinates).all() + and np.equal(lhs.sigmas, rhs.sigmas).all() + ) + ) + ) + + +def _find_or_remap_id(id: Uid64, mapping: dict[Uid64, Uid64]) -> Uid64: + try: + return mapping[id] + except KeyError: + # We don't do anything to avoid colissions here based on the fact that the + # probability of having a collision with 1,000,000 elements is 2.7e-8 which + # is just a bit higher than the probability of winning EuroMillions. We have + # to play this game ~ 4,000,000 times to have a 10% chance of having one or + # more collisions. + new_id = uid64() + mapping[id] = new_id + return new_id + + +def _merge_names(objects: list[Any]): + + names = [o.metadata.name for o in objects if o.metadata.name is not None] + + unnamed = len(objects) - len(names) + if unnamed == len(objects): + return None # don't give any name to a union of unamed items + if unnamed == 1: + names.append("1 unnamed item") + elif unnamed != 0: + names.append("%d unnamed items" % unnamed) + + return " + ".join(names) + + +def _make_temporary_sources(objects: list[Any]): + """This function is meant to return the union of all original sources. + The purpose is to use it after all objects are merged to replace all sources + of the same type by the object in which they have been merged""" + sources = [] + for object in objects: + if isinstance(object.metadata.sources, list): + sources += object.metadata.sources + else: + for source in object.metadata.sources.__dict__.values(): + sources.append( + ProjectSource(id=source.metadata.id, type=source.metadata.type) + ) + return sources + + +def _merge_subojects_by_key( + key, containers, shifts=None, sensor_id_mappings=None, **kwargs +): + attributes = [ + getattr(container, key) for container in containers if hasattr(container, key) + ] + for o in attributes: + if isinstance(o, list) and len(o) > 1: + raise ValueError( + "Impossible to merge objects with more than one instance per suboject type" + ) + # Flattenning the attributes which are lists to their first element only + objects = [] + for attribute in attributes: + if isinstance(attribute, list): + if len(attribute) == 1: + objects.append(attribute[0]) + elif attribute is not None: + objects.append(attribute) + + if len(objects) == 0: + return None + + kwargs = copy.copy(kwargs) + + # Filtering of arguments that are lists with as many elements as projects to pass the + # right elements depending on the presence of the current object key in each project. + def filter_list(elements): + def accept(container, key): + if not hasattr(container, key): + return False + attribute = getattr(container, key) + if isinstance(attribute, list): + return len(attribute) != 0 + else: + return attribute is not None + + return [ + element + for container, element in zip(containers, elements) + if accept(container, key) + ] + + if shifts: + kwargs["shifts"] = filter_list(shifts) + if sensor_id_mappings: + kwargs["sensor_id_mappings"] = filter_list(sensor_id_mappings) + + return merge(*objects, **kwargs) + + +def _fix_sources(project: ProjectObjects, uuid_mapping: dict[UUID, UUID]) -> None: + for key, items in project.__dict__.items(): + + if key.startswith("_"): + continue + + for item in items: + sources = { + uuid_mapping[source.id]: source.type for source in item.metadata.sources + } + item.metadata.sources = [ + ProjectSource(id=id, type=type) for id, type in sources.items() + ] + + +def _merge_labels(objs: list[Any]): + labels = list( + { + label + for o in objs + if o.metadata.labels is not None + for label in o.metadata.labels + } + ) + if len(labels) == 0: + return None + return labels + + +def _merge_scene_reference_frames( + srss: list[SceneReferenceFrame], **kwargs +) -> SceneReferenceFrame: + # Nothing smart here, just return the first one after verifying they are + # all compatible. + srs = copy.deepcopy(srss[0]) + for other in srss[1:]: + if ( + other.crs.definition != srs.crs.definition + or other.base_to_canonical.swap_xy != srs.base_to_canonical.swap_xy + or (other.base_to_canonical.scale != srs.base_to_canonical.scale).any() + ): + raise RuntimeError("Incompatible spatial reference frames for merging") + _clear_unsupported_attributes(srs) + + if srs.metadata is None: + raise RuntimeError("SceneReferenceFrame metadata is None") + + srs.metadata.sources = [] + return srs + + +def _merge_metadata(objects: list[Any]): + # This function assumes that all objects are of the same type + return Metadata( + type=objects[0].metadata.type, + name=_merge_names(objects), + labels=_merge_labels(objects), + sources=_make_temporary_sources(objects), + ) + + +def _merge_camera_lists(camera_objs: list[CameraList], **kwargs) -> CameraList: + + id_to_uri = {} + + # The consistency of the camera UIDs is checked while creating the final list of cameras + cameras = [] + for camera_list in camera_objs: + for camera in camera_list.cameras: + try: + if id_to_uri[camera.id] != camera.uri: + raise RuntimeError( + "Fatal error: Camera UID inconsistency found for UID %s" + % camera.id + ) + except KeyError: + id_to_uri[camera.id] = camera.uri + cameras.append(_clear_unsupported_attributes(copy.copy(camera))) + + result = CameraList(cameras) + result.metadata = _merge_metadata(camera_objs) + return result + + +def _merge_marks(list1: list[Mark], list2: list[Mark]) -> list[Mark]: + + marks = {m.camera_id: m for m in list1} + for mark in list2: + try: + previous = marks[mark.camera_id] + if ( + previous.position_px != mark.position_px + ).any() or previous.accuracy != mark.accuracy: + raise RuntimeError("Inconsistent marks") + except KeyError: + marks[mark.camera_id] = mark + + return list(marks.values()) + + +def _merge_input_control_points( + cps_objs: list[InputControlPoints], **kwargs +) -> InputControlPoints: + + gcps = {} + mtps = {} + + def check_gcp_compatible(lhs, rhs) -> bool: + return lhs.is_checkpoint == rhs.is_checkpoint and _are_geolocations_equal( + lhs.geolocation, rhs.geolocation + ) + + def check_mtp_compatible(lhs, rhs) -> bool: + return lhs.is_checkpoint == rhs.is_checkpoint + + def process_tie_point(tp, output, other_type_output, comparison_fun): + + if tp.id in other_type_output: + raise RuntimeError("Tie point found as both MTP and GCP") + try: + existing = output[tp.id] + if not comparison_fun(tp, existing): + raise RuntimeError( + "Two incompatible instances of the same tie point found" + ) + existing.marks = _merge_marks(existing.marks, tp.marks) + except KeyError: + output[tp.id] = _clear_unsupported_attributes(copy.deepcopy(tp)) + except RuntimeError as e: + raise RuntimeError( + "Fatal error: Merging tie point %s: %s" % (tp.id, e.args[0]) + ) from e + + for cps in cps_objs: + for gcp in cps.gcps: + process_tie_point(gcp, gcps, mtps, check_gcp_compatible) + for mtp in cps.mtps: + process_tie_point(mtp, mtps, gcps, check_mtp_compatible) + + result = InputControlPoints(gcps=list(gcps.values()), mtps=list(mtps.values())) + result.metadata = _merge_metadata(cps_objs) + return result + + +def _merge_projected_control_points(cps_objs: list[ProjectedControlPoints], **kwargs): + + gcps = {} + + shifts = kwargs.get("shifts", [np.zeros(3) for i in range(len(cps_objs))]) + + for cps, shift in zip(cps_objs, shifts): + for gcp in cps.projected_gcps: + + gcp = _clear_unsupported_attributes(copy.deepcopy(gcp)) + gcp.coordinates += shift + + try: + existing = gcps[gcp.id] + if not np.allclose( + gcp.coordinates, existing.coordinates + ) or not np.allclose(gcp.sigmas, existing.sigmas): + raise RuntimeError( + "Fatal error: Repeated ID merging incompatible projected GCP: %s" + % gcp.id + ) + except KeyError: + gcps[gcp.id] = gcp + + result = ProjectedControlPoints(projected_gcps=list(gcps.values())) + result.metadata = _merge_metadata(cps_objs) + return result + + +def _merge_orientation_constraints( + constraint_objs: list[Constraints], +) -> list[OrientationConstraint]: + + orientations = {} + for constraints in constraint_objs: + for orientation in constraints.orientation_constraints: + try: + existing = orientations[orientation.id] + if ( + existing.id_from != orientation.id_from + or existing.id_to != orientation.id_to + or existing.sigma_deg != orientation.sigma_deg + or (existing.unit_vector != orientation.unit_vector).any() + ): + raise RuntimeError( + "FatalError: Repeated ID merging incompatible orientation contraint: %s" + % orientation.id + ) + except KeyError: + orientations[orientation.id] = orientation + + return list(orientations.values()) + + +def _merge_scale_constraints( + constraint_objs: list[Constraints], +) -> list[ScaleConstraint]: + + scales = {} + for constraints in constraint_objs: + for scale in constraints.scale_constraints: + try: + existing = scales[scale.id] + if ( + existing.id_from != scale.id_from + or existing.id_to != scale.id_to + or existing.sigma != scale.sigma + or existing.distance != scale.distance + ): + raise RuntimeError( + "FatalError: Repeated ID merging incompatible scale contraint: %s" + % scale.id + ) + except KeyError: + scales[scale.id] = scale + + return list(scales.values()) + + +def _merge_constraints(constraint_objs: list[Constraints], **kwargs) -> Constraints: + + result = Constraints( + orientation_constraints=_merge_orientation_constraints(constraint_objs), + scale_constraints=_merge_scale_constraints(constraint_objs), + ) + result.metadata = _merge_metadata(constraint_objs) + return result + + +@overload +def _merge_sensors( + camera_objs: list[InputCameras], + sensor_id_mappings: list[dict[Uid64, Uid64]], + input_type_name="input cameras", +) -> list[Sensor]: + ... + + +@overload +def _merge_sensors( + camera_objs: list[ProjectedInputCameras], + sensor_id_mappings: list[dict[Uid64, Uid64]], + input_type_name="input cameras", +) -> list[ProjectedSensor]: + ... + + +def _merge_sensors( + camera_objs: list[InputCameras] | list[ProjectedInputCameras], + sensor_id_mappings: list[dict[Uid64, Uid64]], + input_type_name="input cameras", +) -> list[Sensor] | list[ProjectedSensor]: + + """Merge all sensors in a single list. + Sensors in each InputCameras object get reassigned ID based on the input mapping or + new random IDs are produced and stored in the mapping if the sensor is not found.""" + + sensors = [] + for input_cameras, mapping in zip(camera_objs, sensor_id_mappings): + used_sensor_ids = set() + for sensor in input_cameras.sensors: + if sensor.id in used_sensor_ids: + raise RuntimeError( + "Fatal error: Repeated sensor ID found in the same %s: %s" + % (input_type_name, sensor.id) + ) + used_sensor_ids.add(sensor.id) + + sensor = _clear_unsupported_attributes(copy.deepcopy(sensor)) + sensor.id = _find_or_remap_id(sensor.id, mapping) + sensors.append(sensor) + + return sensors + + +def _merge_captures( + camera_objs: list[InputCameras], sensor_id_mappings: list[dict[Uid64, Uid64]] +) -> list[Capture]: + """Merge all captures in a single list, applying a sensor_id remapping. + It's not allowed to have two captures with the same ID in two different input camera lists + or the same camera in to different captures. + """ + captures = [] + used_capture_ids = set() + used_camera_ids = set() + + for input_cameras, mapping in zip(camera_objs, sensor_id_mappings): + for capture in input_cameras.captures: + if capture.id in used_capture_ids: + raise RuntimeError( + "Fatal error: Repeated capture found in input cameras: %s" + % capture.id + ) + used_capture_ids.add(capture.id) + capture = _clear_unsupported_attributes(copy.deepcopy(capture)) + for camera in capture.cameras: + if camera.id in used_camera_ids: + raise RuntimeError( + "Fatal error: Repeated camera found in input cameras: %s" + % camera.id + ) + used_camera_ids.add(camera.id) + camera.sensor_id = _find_or_remap_id(camera.sensor_id, mapping) + + captures.append(capture) + + return captures + + +def _merge_input_cameras(camera_objs: list[InputCameras], **kwargs) -> InputCameras: + """Merges all input cameras into a single list. + Sensor IDs are remapped to ensure that each sub project uses different IDs. Camera UIDs are not + remapped as the input lists are assumed to be using non-overlapping sets of UIDs. + :param camera_objs: a list of :class:`InputCameras` + :param kwargs: may contain an input/output `sensor_id_mappings`. This parameter is a list + of dicts to be used for reusing and/or completing the sensor id mapping. Each element in the list + is the mapping to be used for each InputCameras in the same order. + :return: a :class:`InputCameras` instance + """ + mappings = kwargs.get("sensor_id_mappings", [{} for i in range(len(camera_objs))]) + sensors = _merge_sensors(camera_objs, mappings) + captures = _merge_captures(camera_objs, mappings) + + result = InputCameras(captures=captures, sensors=sensors) + result.metadata = _merge_metadata(camera_objs) + + return result + + +def _merge_projected_sensors( + camera_objs: list[ProjectedInputCameras], + sensor_id_mappings: list[dict[Uid64, Uid64]], +) -> list[ProjectedSensor]: + return _merge_sensors(camera_objs, sensor_id_mappings, "projected input cameras") + + +def _merge_projected_captures( + camera_objs: list[ProjectedInputCameras], shifts: list[np.ndarray] +) -> list[ProjectedCapture]: + """Merge all projected captures in a single list, applying a shift to projected coordinates. + It's not allowed to have two captures with the same ID in two different projected camera lists. + """ + captures = [] + used_capture_ids = set() + + for projected_cameras, shift in zip(camera_objs, shifts): + for capture in projected_cameras.captures: + if capture.id in used_capture_ids: + raise RuntimeError( + "Fatal error: Repeated capture found in projected input cameras: %s" + % capture.id + ) + used_capture_ids.add(capture.id) + capture = _clear_unsupported_attributes(copy.deepcopy(capture)) + if capture.geolocation is not None: + capture.geolocation.position += shift + + captures.append(capture) + + return captures + + +def _merge_projected_input_cameras( + camera_objs: list[ProjectedInputCameras], **kwargs +) -> ProjectedInputCameras: + """Merges all projected input cameras into a single list. + Sensor IDs are remapped to ensure that each sub project uses different IDs. Camera UIDs are not + remapped as the input lists are assumed to be using non-overlapping sets of UIDs. + :param camera_objs: a list of :class:`ProjectedInputCameras` + :param kwargs: may contain: + * a 'shifts' parameter with the shifts to apply to projected coordinates in each item + * an input/output `sensor_id_mappings`. This parameter is a list + of dicts to be used for reusing and/or completing the sensor id mapping. Each element in + the list is the mapping to be used for each InputCameras in the same order. + :return: a :class:`ProjectedInputCameras` instance + """ + mappings = kwargs.get("sensor_id_mappings", [{} for i in range(len(camera_objs))]) + sensors = _merge_projected_sensors(camera_objs, mappings) + + shifts = kwargs.get("shifts", [np.zeros(3) for i in range(len(camera_objs))]) + captures = _merge_projected_captures(camera_objs, shifts) + + result = ProjectedInputCameras(captures=captures, sensors=sensors) + result.metadata = _merge_metadata(camera_objs) + + return result + + +def _merge_calibrated_cameras( + camera_objs: list[CalibratedCameras], **kwargs +) -> CalibratedCameras: + + mappings = kwargs.get("sensor_id_mappings", [{} for i in range(len(camera_objs))]) + shifts = kwargs.get("shifts", [np.zeros(3) for i in range(len(camera_objs))]) + + sensors = [] + cameras = [] + used_camera_ids = set() + + for calibrated_cameras, shift, mapping in zip(camera_objs, shifts, mappings): + for sensor in calibrated_cameras.sensors: + sensor = _clear_unsupported_attributes(copy.deepcopy(sensor)) + sensor.id = _find_or_remap_id(sensor.id, mapping) + sensors.append(sensor) + + for camera in calibrated_cameras.cameras: + camera = _clear_unsupported_attributes(copy.deepcopy(camera)) + if camera.id in used_camera_ids: + raise RuntimeError( + "Fatal error: Repeated camera found in calibrated cameras: %s" + % camera.id + ) + used_camera_ids.add(camera.id) + camera.sensor_id = _find_or_remap_id(camera.sensor_id, mapping) + camera.position += shift + cameras.append(camera) + + # No metadata needs to be merged or considered because this is not a top level project item + return CalibratedCameras(cameras=cameras, sensors=sensors) + + +def _merge_calibrated_control_points( + cp_objs: list[CalibratedControlPoints], **kwargs +) -> CalibratedControlPoints: + + shifts = kwargs.get("shifts", [np.zeros(3) for i in range(len(cp_objs))]) + + points = defaultdict(list) + for calibrated_cps, shift in zip(cp_objs, shifts): + for point in calibrated_cps.points: + points[point.id].append(point.coordinates + shift) + + # No metadata needs to be merged or considered because this is not a top level project item + return CalibratedControlPoints( + points=[ + CalibratedControlPoint(id=id, coordinates=np.mean(coords, 0)) + for id, coords in points.items() + ] + ) + + +def _merge_gps_bias(gps_bias_objs: list[GpsBias], **kwargs): + warnings.warn( + "GPS bias resources cannot be merged, the output will not contain any" + ) + + +def _merge_calibrations(calibrations: list[Calibration], **kwargs) -> Calibration: + + result = Calibration() + result.metadata = _merge_metadata(calibrations) + + kwargs = copy.copy(kwargs) + shifts = kwargs.pop("shifts", [np.zeros(3) for i in range(len(calibrations))]) + sensor_id_mappings = kwargs.pop( + "sensor_id_mappings", [{} for i in range(len(calibrations))] + ) + + kwargs["base_dir"] = kwargs.get("base_dir", Path(".")) / str(result.metadata.id) + + keys = { + key + for calibration in calibrations + for key in calibration.__dict__ + if not key.startswith("_") + } + for key in keys: + merged_item = _merge_subojects_by_key( + key, calibrations, shifts, sensor_id_mappings, **kwargs + ) + if merged_item: + setattr(result, key, [merged_item]) + + return result + + +def _verify_calibrated_control_point_consistency( + projects: list[ProjectObjects], **kwargs +): + """Verifies that the standard deviation of the distribution of calibrated control points positions + is consistent with the measurement error given in the input control points. + Raises an error in case of inconsistency.""" + pass + + +def _merge_point_clouds(point_clouds: list[GlTFPointCloud], **kwargs) -> GlTFPointCloud: + + shifts = kwargs.get("shifts", [np.zeros(3) for i in range(len(point_clouds))]) + base_dir = kwargs.get("base_dir", Path(".")) + + if hasattr(point_clouds[0], "metadata"): + metadata = _merge_metadata(point_clouds) + output_dir = base_dir / str(metadata.id) + else: + metadata = None + output_dir = base_dir + + result = pcl_merge.concatenate(point_clouds) + + shifts = [ + shift + for shift, point_cloud in zip(shifts, point_clouds) + for i in range(len(point_cloud.nodes)) + ] + + for node, shift in zip(result.nodes, shifts): + if not (shift == np.zeros(3)).all(): + if node.matrix is not None: + node.matrix = opf_axis_rotation_matrix_inverse @ node.matrix + node.matrix[0, 3] += shift[0] + node.matrix[1, 3] += shift[1] + node.matrix[2, 3] += shift[2] + node.matrix = opf_axis_rotation_matrix @ node.matrix + else: + # Since there was no matrix, we can't assume much about + # the origin of the data. + node.matrix = np.array( + [ + [1, 0, 0, shift[0]], + [0, 1, 0, shift[1]], + [0, 0, 1, shift[2]], + [0, 0, 0, 1], + ] + ) + + os.mkdir(output_dir) + + result = pcl_merge.collapse(result, output_dir) + if metadata: + result.metadata = metadata + + return result + + +def _merge_project_objects(projects: list[ProjectObjects], **kwargs) -> ProjectObjects: + + assert len(projects) > 1 # This is guaranteed by the public merge function + + result = ProjectObjects() + + result.metadata.name = "Merged project" + result.metadata.description = "Merge of projects: " + ", ".join( + [str(project.metadata.id) for project in projects] + ) + + # This contains the mapping from the original item UUIDs to the UUID of the merged item, + uuid_mapping = {} + + def merge_by_key(key, shifts=None, sensor_id_mappings=None, **kwargs): + + result = _merge_subojects_by_key( + key, projects, shifts, sensor_id_mappings, **kwargs + ) + + if result is None: + return None + + for project in projects: + try: + for object in getattr(project, key): + uuid_mapping[object.metadata.id] = result.metadata.id + except AttributeError: + pass + + return result + + # The scene reference frames require special treatment + scene_reference_frame = merge_by_key("scene_reference_frame_objs") + if scene_reference_frame: + setattr(result, "scene_reference_frame_objs", [scene_reference_frame]) + + if scene_reference_frame is None: + raise RuntimeError("Could not obtain the scene reference frame") + + shifts = [] + for project in projects: + if project.scene_reference_frame is not None: + shifts.append( + scene_reference_frame.base_to_canonical.shift + - project.scene_reference_frame.base_to_canonical.shift + ) + else: + shifts.append(None) + sensor_id_mappings = [{} for i in range(len(projects))] + + keys = { + key + for project in projects + for key in project.__dict__ + if key != "scene_reference_frame_objs" and not key.startswith("_") + } + for key in keys: + merged_item = merge_by_key(key, shifts, sensor_id_mappings, **kwargs) + if merged_item: + setattr(result, key, [merged_item]) + + _fix_sources(result, uuid_mapping) + _verify_calibrated_control_point_consistency(projects) + + return result + + +def _merge_projects(projects: list[Project], **kwargs): + return _merge_project_objects([resolve(project) for project in projects], **kwargs) + + +def merge(first, *rest, **kwargs): + + for x in rest: + if type(first) != type(x): + raise TypeError("Objects of mixed types cannot be merged") + + objects = [first] + list(rest) + + for t, fun in [ + (Calibration, _merge_calibrations), + (CalibratedCameras, _merge_calibrated_cameras), + (CalibratedControlPoints, _merge_calibrated_control_points), + (CameraList, _merge_camera_lists), + (Constraints, _merge_constraints), + (GlTFPointCloud, _merge_point_clouds), + (GpsBias, _merge_gps_bias), + (InputCameras, _merge_input_cameras), + (InputControlPoints, _merge_input_control_points), + (Project, _merge_projects), + (ProjectObjects, _merge_project_objects), + (ProjectedControlPoints, _merge_projected_control_points), + (ProjectedInputCameras, _merge_projected_input_cameras), + (SceneReferenceFrame, _merge_scene_reference_frames), + ]: + + if type(first) == t: + return fun(objects, **kwargs) + + raise TypeError("Unknown OPF type to merge: %s" % type(first)) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Simple OPF project merging tool") + parser.add_argument( + "input", + metavar="project.json", + type=str, + nargs="+", + help="A list of OPF project files", + ) + parser.add_argument( + "outdir", type=str, help="Output directory for the merged project" + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + default=False, + help="Do not ask for confirmation when overwriting output files", + ) + + return parser.parse_args() + + +def main(): + + args = parse_args() + + if len(args.input) == 1: + print("Only one input project was given, is the output directory missing?") + return 0 + + output_dir = args.outdir + if os.path.exists(output_dir): + if len(os.listdir(output_dir)) != 0 and not args.force: + while True: + try: + answer = input( + "The output directory is not empty do you want to procced [yN]? " + ) + except KeyboardInterrupt: + print() + exit(-1) + except EOFError: + answer = "" + if answer == "n" or answer == "N" or answer == "": + exit(0) + if answer == "y" or answer == "Y": + break + else: + os.makedirs(output_dir) + + projects = [load(input) for input in args.input] + + merged_project = merge(*projects, base_dir=Path(output_dir)) + + save(merged_project, output_dir + "/project.opf") diff --git a/src/opf_tools/undistort/__main__.py b/src/opf_tools/undistort/__main__.py new file mode 100644 index 0000000..f9537d0 --- /dev/null +++ b/src/opf_tools/undistort/__main__.py @@ -0,0 +1,6 @@ +import sys + +from opf_tools.undistort.undistorter import main + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/opf_tools/undistort/undistorter.py b/src/opf_tools/undistort/undistorter.py new file mode 100644 index 0000000..43a4704 --- /dev/null +++ b/src/opf_tools/undistort/undistorter.py @@ -0,0 +1,228 @@ +import argparse +import functools +import os +from urllib.parse import urlparse +from urllib.request import url2pathname + +import numpy as np +from PIL import Image + +from pyopf.cameras import CalibratedSensor, PerspectiveInternals +from pyopf.io import load +from pyopf.project import ProjectObjects +from pyopf.resolve import resolve + + +def camera_supported( + camera_uri: str, sensor: list[CalibratedSensor] +) -> tuple[bool, str]: + """Check if camera is supported, and create warning message accordingly if not.""" + supported = True + warning_message = "Warning! Image " + camera_uri + + url = urlparse(camera_uri) + if (url.hostname is not None and url.hostname != "localhost") or ( + url.scheme != "file" and url.scheme != "" + ): + # check if camera uri is supported + warning_message += " has an unsupported URI. Only relative URI references or absolute URIs referring to the localhost are supported. \ + Also only 'file' or '' url scheme are supported." + supported = False + elif len(sensor) == 0: + # check if camera has calibrated sensor + warning_message += " has no calibrated sensor." + supported = False + elif sensor[0].internals.type != "perspective": + # check if camera is perspective + warning_message += ( + " uses a unsupported camera type, only perspective cameras are supported." + ) + supported = False + warning_message += " It will be skipped." + + return supported, warning_message + + +def load_image(opf_project_folder: str, img_path: str) -> np.ndarray: + """Load the original image.""" + if not os.path.isabs(img_path): + # if relative path, make absolute + img_path = os.path.join(opf_project_folder, img_path) + img = np.asarray(Image.open(img_path)) + + return img + + +def save_image(image: np.ndarray, save_path: str) -> None: + """Save the undistorted image.""" + if not os.path.exists(os.path.dirname(save_path)): + os.makedirs(os.path.dirname(save_path)) + + Image.fromarray(image, "RGB").save(save_path) + + +@functools.lru_cache +def compute_undistort_map( + h: int, w: int, sensor_internals: PerspectiveInternals +) -> tuple[np.ndarray, np.ndarray]: + """Compute the undistortion mapping. + + Compute the uv-xy mapping. A given sensor will always follow the same mapping, so we use memoization to not recompute the same output multiple times. + The output is used in billinear interpolation, so for each pixel of undistorted image we need 4 pixel from original. These are returned uv_mapping of shape (4, h, w). + We also need the corresponding coefficients, these are returned in coeffs, also of shape (4, h, w). + OpenCV implementation: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html + """ + # get sensor internals + focal_length = sensor_internals.focal_length_px + principal_point = sensor_internals.principal_point_px + ks = np.zeros(6) + ks[0 : len(sensor_internals.radial_distortion)] = sensor_internals.radial_distortion + ps = np.zeros(2) + ps[ + 0 : len(sensor_internals.tangential_distortion) + ] = sensor_internals.tangential_distortion + + # normalized coordinates + norm_x = np.tile(np.arange(w), (h, 1)) + norm_y = np.transpose(np.tile(np.arange(h), (w, 1))) + xy = ( + np.array([norm_x - principal_point[0], norm_y - principal_point[1]]) + / focal_length + ) + # radius squared + r2 = xy[0] ** 2 + xy[1] ** 2 + # distortion model + radial_distort = (1 + ks[0] * r2 + ks[1] * r2**2 + ks[2] * r2**3) / ( + 1 + ks[3] * r2 + ks[4] * r2**2 + ks[5] * r2**3 + ) + source_xy = xy * radial_distort + source_xy[0] += 2 * ps[0] * xy[0] * xy[1] + ps[1] * (r2 + 2 * xy[0] ** 2) + source_xy[1] += 2 * ps[1] * xy[0] * xy[1] + ps[0] * (r2 + 2 * xy[1] ** 2) + + # in uv space + uv = source_xy * focal_length + principal_point.reshape(2, 1, 1) + # crop to avoid mapping to outside image + uv = np.maximum(0.5, uv) + uv[0] = np.minimum(w - 1.5, uv[0]) + uv[1] = np.minimum(h - 1.5, uv[1]) + # fixing to integer, for each uv coordinate there will be 4 close pixels, we keep their coordinates in uv_mapping + uv_max = np.ceil(uv).astype(int) + uv_max = np.maximum(0, uv_max) + uv_max[0] = np.minimum(w - 1, uv_max[0]) + uv_max[1] = np.minimum(h - 1, uv_max[1]) + uv_min = np.floor(uv).astype(int) + uv_min = np.maximum(0, uv_min) + uv_min[0] = np.minimum(w - 1, uv_min[0]) + uv_min[1] = np.minimum(h - 1, uv_min[1]) + uv_mapping = np.concatenate((uv_max, uv_min)) + + # coefficients for interpollation of each coordinate + d_uvmax = uv_max - uv + d_uvmin = uv - uv_min + coeffs = np.concatenate((d_uvmax, d_uvmin)) + + return coeffs, uv_mapping + + +def bilinear_interpolation( + img: np.ndarray, coeffs: np.ndarray, uv_mapping: np.ndarray +) -> np.ndarray: + """Compute the undistorted image using bilinear interpolation.""" + h, w = img.shape[:2] + + fxy1 = np.multiply( + coeffs[0].reshape(h, w, 1), img[uv_mapping[3], uv_mapping[2]] + ) + np.multiply(coeffs[2].reshape(h, w, 1), img[uv_mapping[3], uv_mapping[0]]) + fxy2 = np.multiply( + coeffs[0].reshape(h, w, 1), img[uv_mapping[1], uv_mapping[2]] + ) + np.multiply(coeffs[2].reshape(h, w, 1), img[uv_mapping[1], uv_mapping[0]]) + img = ( + np.multiply(coeffs[1].reshape(h, w, 1), fxy1) + + np.multiply(coeffs[3].reshape(h, w, 1), fxy2) + ).astype("uint8") + + return img + + +def undistort(project: ProjectObjects, opf_project_folder: str) -> None: + """Undistort all images of the project for which a calibrated sensor exists.""" + if ( + (project.input_cameras is None) + or (project.camera_list is None) + or (project.calibration is None) + or (project.calibration.calibrated_cameras is None) + ): + print("Project doesn't have calibrated cameras. Quitting.") + return + + for capture in project.input_cameras.captures: + for camera in capture.cameras: + # get camera's image uri + camera_uri = [ + temp_camera.uri + for temp_camera in project.camera_list.cameras + if temp_camera.id == camera.id + ][0] + + sensor = [ + sensor + for sensor in project.calibration.calibrated_cameras.sensors + if sensor.id == camera.sensor_id + ] + + supported, warning_message = camera_supported(camera_uri, sensor) + if not supported: + print(warning_message) + continue + else: + sensor = sensor[0] + camera_url = url2pathname(urlparse(camera_uri).path) + + # load camera image + print("Input image: ", camera_uri) + img = load_image(opf_project_folder, camera_url) + + # get sampling map (where to sample original image) + h, w = img.shape[:2] + coeffs, uv_mapping = compute_undistort_map(h, w, sensor.internals) + + # bilinear interpolation + undist_img = bilinear_interpolation(img, coeffs, uv_mapping) + + # puts them in an 'undistort' directory in their original location + save_path = os.path.join( + os.path.dirname(camera_url), "undistort", os.path.basename(camera_url) + ) + if not os.path.isabs(save_path): + # if relative path, make absolute + save_path = os.path.join(opf_project_folder, save_path) + print( + "Output image: ", + save_path, + ) + save_image(undist_img, save_path) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Undistorts the images of an OPF project. Only perspective cameras with a calibrated sensor will be undistorted. \ + The undistorted images will be stored in their original place, but in an 'undistort' directory." + ) + parser.add_argument( + "input", + type=str, + help="An OPF project file", + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + opf_project = args.input + opf_project_folder = os.path.dirname(opf_project) + + project = load(opf_project) + project = resolve(project) + + undistort(project, opf_project_folder) diff --git a/src/pyopf/VersionInfo.py b/src/pyopf/VersionInfo.py new file mode 100644 index 0000000..33a0830 --- /dev/null +++ b/src/pyopf/VersionInfo.py @@ -0,0 +1,107 @@ +import functools +import re +from typing import Optional + + +@functools.total_ordering +class VersionInfo: + """ + A semver like version class without patch and build numbers + """ + + __slots__ = ("_major", "_minor", "_prerelease") + + #: Regex for a semver version + _REGEX = re.compile( + r""" + ^ + (?P0|[1-9]\d*) + \. + (?P0|[1-9]\d*) + (?:-(?P[-0-9a-zA-Z-]+))? + $ + """, + re.VERBOSE, + ) + + def __init__(self, major: int, minor: int = 0, prerelease: Optional[str] = None): + if major < 0 or minor < 0: + raise ValueError("Major and minor version numbers must be positive") + + self._major = major + self._minor = minor + self._prerelease = prerelease + + @property + def major(self): + """The major part of a version (read-only).""" + return self._major + + @property + def minor(self): + """The minor part of a version (read-only).""" + return self._minor + + @property + def prerelease(self): + """The prerelease part of a version (read-only).""" + return self._prerelease + + def to_dict(self): + ret: dict[str, int | str] = {"major": self._major, "minor": self._minor} + if self._prerelease is not None: + ret["prerelease"] = self._prerelease + return ret + + def to_tuple(self): + if self._prerelease is not None: + return (self._major, self._minor, self._prerelease) + else: + return (self._major, self._minor) + + @classmethod + def parse(cls, version: str): + """ + Parse version string to a VersionInfo instance. + + :param version: version string + :return: a :class:`VersionInfo` instance + :raises: :class:`ValueError` + :rtype: :class:`VersionInfo` + """ + match = cls._REGEX.match(version) + if match is None: + raise ValueError("%s is not valid version string" % version) + + version_parts = match.groupdict() + + major = int(version_parts["major"]) + minor = int(version_parts["minor"]) + prerelease = version_parts.get("prerelease", None) + + return cls(major, minor, prerelease) + + def __eq__(self, other): + return self.to_tuple() == other.to_tuple() + + def __lt__(self, other): + us = self.to_tuple() + them = other.to_tuple() + if len(us) == len(them): + return us < them + else: + return us[0:2] < them[0:2] or (us[0:2] == them[0:2] and len(us) > len(them)) + + def __repr__(self): + s = ", ".join("%s=%r" % (key, val) for key, val in self.to_dict().items()) + return "%s(%s)" % (type(self).__name__, s) + + def __str__(self): + """str(self)""" + version = "%d.%d" % (self.major, self.minor) + if self.prerelease: + version += "-%s" % self.prerelease + return version + + def __hash__(self): + return hash(str(self)) diff --git a/src/pyopf/cameras/__init__.py b/src/pyopf/cameras/__init__.py new file mode 100644 index 0000000..947498f --- /dev/null +++ b/src/pyopf/cameras/__init__.py @@ -0,0 +1,44 @@ +from .calibrated_cameras import ( + CalibratedCamera, + CalibratedCameras, + CalibratedRigRelatives, + CalibratedSensor, +) +from .camera_list import CameraData, CameraList +from .gps_bias import GpsBias +from .input_cameras import ( + BandInformation, + Camera, + Capture, + DynamicPixelRange, + Geolocation, + InputCameras, + ModelSource, + OpkOrientation, + PixelRange, + PixelType, + RigModelSource, + Sensor, + ShutterType, + StaticPixelRange, + YprOrientation, +) +from .input_rig_relatives import ( + InputRigRelatives, + RigRelativeRotation, + RigRelativeTranslation, +) +from .projected_input_cameras import ( + ProjectedCapture, + ProjectedGeolocation, + ProjectedInputCameras, + ProjectedOrientation, + ProjectedRigTranslation, + ProjectedSensor, +) +from .sensor_internals import ( + FisheyeInternals, + Internals, + PerspectiveInternals, + SphericalInternals, +) diff --git a/src/pyopf/cameras/calibrated_cameras.py b/src/pyopf/cameras/calibrated_cameras.py new file mode 100644 index 0000000..1be550f --- /dev/null +++ b/src/pyopf/cameras/calibrated_cameras.py @@ -0,0 +1,215 @@ +from typing import Any, Dict, List, Optional + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import ( + from_float, + from_list, + from_none, + from_str, + from_union, + to_class, + to_float, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type +from .sensor_internals import ( + FisheyeInternals, + Internals, + PerspectiveInternals, + SphericalInternals, +) + + +class CalibratedCamera(OpfObject): + id: Uid64 + """Unique ID of the camera, it must appear in the input cameras.""" + orientation_deg: np.ndarray + """Calibrated Omega-Phi-Kappa angles in degree representing a rotation R_x(ω)R_y(ϕ)R_z(κ) + from the image CS to the processing CRS. + """ + position: np.ndarray + """Calibrated coordinates in the processing CRS.""" + rolling_shutter: Optional[np.ndarray] + """Refer to [this + document](https://s3.amazonaws.com/mics.pix4d.com/KB/documents/isprs_rolling_shutter_paper_final_2016.pdf). + """ + sensor_id: Uid64 + """Unique ID of the sensor used by this camera.""" + + def __init__( + self, + id: Uid64, + sensor_id: Uid64, + orientation_deg: np.ndarray, + position: np.ndarray, + rolling_shutter: Optional[np.ndarray] = None, + ) -> None: + super(CalibratedCamera, self).__init__() + self.id = id + self.orientation_deg = orientation_deg + self.position = position + self.rolling_shutter = rolling_shutter + self.sensor_id = sensor_id + + @staticmethod + def from_dict(obj: Any) -> "CalibratedCamera": + assert isinstance(obj, dict) + id = Uid64(int=int(obj.get("id"))) + orientation_deg = vector_from_list(obj.get("orientation_deg"), 3, 3) + position = vector_from_list(obj.get("position"), 3, 3) + rolling_shutter = from_union( + [lambda x: vector_from_list(x, 3, 3), from_none], obj.get("rolling_shutter") + ) + sensor_id = Uid64(int=int(obj.get("sensor_id"))) + result = CalibratedCamera( + id, sensor_id, orientation_deg, position, rolling_shutter + ) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CalibratedCamera, self).to_dict() + result["id"] = self.id.int + result["orientation_deg"] = from_list(to_float, self.orientation_deg) + result["position"] = from_list(to_float, self.position) + if self.rolling_shutter is not None: + result["rolling_shutter"] = from_union( + [lambda x: from_list(to_float, x), from_none], self.rolling_shutter + ) + result["sensor_id"] = self.sensor_id.int + return result + + +class CalibratedRigRelatives(OpfObject): + """Calibrated rig relatives contain the optimised relative translations and rotations in + processing CRS units. + """ + + """Euler angles in degree (see convention [here](auxiliary_objects.md#rig-relatives))""" + rotation_angles_deg: np.ndarray + """Relative translation in processing CRS units""" + translation: np.ndarray + + def __init__( + self, + rotation_angles_deg: np.ndarray, + translation: np.ndarray, + ) -> None: + super(CalibratedRigRelatives, self).__init__() + self.rotation_angles_deg = rotation_angles_deg + self.translation = translation + + @staticmethod + def from_dict(obj: Any) -> "CalibratedRigRelatives": + assert isinstance(obj, dict) + rotation_angles_deg = vector_from_list(obj.get("rotation_angles_deg"), 3, 3) + translation = vector_from_list(obj.get("translation"), 3, 3) + result = CalibratedRigRelatives(rotation_angles_deg, translation) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CalibratedRigRelatives, self).to_dict() + result["rotation_angles_deg"] = from_list(to_float, self.rotation_angles_deg) + result["translation"] = from_list(to_float, self.translation) + return result + + +class CalibratedSensor(OpfObject): + """Unique ID of the sensor, it must appear in the input cameras.""" + + id: Uid64 + """Calibrated sensor internal parameters.""" + internals: Internals + rig_relatives: Optional[CalibratedRigRelatives] + + def __init__( + self, + id: Uid64, + internals: Internals, + rig_relatives: Optional[CalibratedRigRelatives] = None, + ) -> None: + super(CalibratedSensor, self).__init__() + self.id = id + self.internals = internals + self.rig_relatives = rig_relatives + + @staticmethod + def from_dict(obj: Any) -> "CalibratedSensor": + assert isinstance(obj, dict) + id = Uid64(int=int(obj.get("id"))) + internals = from_union( + [ + SphericalInternals.from_dict, + PerspectiveInternals.from_dict, + FisheyeInternals.from_dict, + ], + obj.get("internals"), + ) + rig_relatives = from_union( + [CalibratedRigRelatives.from_dict, from_none], obj.get("rig_relatives") + ) + result = CalibratedSensor(id, internals, rig_relatives) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CalibratedSensor, self).to_dict() + result["id"] = self.id.int + result["internals"] = to_class(Internals, self.internals) + if self.rig_relatives is not None: + result["rig_relatives"] = from_union( + [lambda x: to_class(CalibratedRigRelatives, x), from_none], + self.rig_relatives, + ) + return result + + +class CalibratedCameras(BaseItem): + """Definition of Calibrated Camera Parameters""" + + """Calibrated camera parameters.""" + cameras: List[CalibratedCamera] + """Calibrated sensor parameters.""" + sensors: List[CalibratedSensor] + + def __init__( + self, + cameras: List[CalibratedCamera], + sensors: List[CalibratedSensor], + format: CoreFormat = CoreFormat.CALIBRATED_CAMERAS, + version: VersionInfo = FormatVersion.CALIBRATED_CAMERAS, + ) -> None: + super(CalibratedCameras, self).__init__(format=format, version=version) + + self.cameras = cameras + self.sensors = sensors + + @staticmethod + def from_dict(obj: Any) -> "CalibratedCameras": + base = BaseItem.from_dict(obj) + cameras = from_list(CalibratedCamera.from_dict, obj.get("cameras")) + sensors = from_list(CalibratedSensor.from_dict, obj.get("sensors")) + result = CalibratedCameras(cameras, sensors, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(CalibratedCameras, self).to_dict() + result["cameras"] = from_list( + lambda x: to_class(CalibratedCamera, x), self.cameras + ) + result["sensors"] = from_list( + lambda x: to_class(CalibratedSensor, x), self.sensors + ) + return result + + +format_and_version_to_type[ + (CoreFormat.CALIBRATED_CAMERAS, FormatVersion.CALIBRATED_CAMERAS) +] = CalibratedCameras diff --git a/src/pyopf/cameras/camera_list.py b/src/pyopf/cameras/camera_list.py new file mode 100644 index 0000000..eec35c8 --- /dev/null +++ b/src/pyopf/cameras/camera_list.py @@ -0,0 +1,77 @@ +from typing import Any, List + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import from_list, from_str, to_class +from ..versions import FormatVersion, format_and_version_to_type + + +class CameraData(OpfObject): + """All data files associated with one camera in a capture""" + + id: Uid64 + + uri: str + """The location of the image file given as a URI-reference. For multi-page images, only one + page shall be considered. By default, that page will be the first one unless the URI + refers to a different one in a fragment part with the syntax "page=n", e.g. + "image.tiff#page=1". + """ + + def __init__(self, id: Uid64, uri: str) -> None: + super(CameraData, self).__init__() + self.id = id + self.uri = uri + + @staticmethod + def from_dict(obj: Any) -> "CameraData": + assert isinstance(obj, dict) + id = Uid64(int=int(obj.get("id"))) + uri = from_str(obj.get("uri")) + result = CameraData(id, uri) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CameraData, self).to_dict() + result["id"] = self.id.int + result["uri"] = from_str(self.uri) + return result + + +class CameraList(BaseItem): + """List of primitive camera data files""" + + cameras: List[CameraData] + """List of all cameras in all captures.""" + + def __init__( + self, + cameras: List[CameraData], + format: CoreFormat = CoreFormat.CAMERA_LIST, + version: VersionInfo = FormatVersion.CAMERA_LIST, + ) -> None: + super(CameraList, self).__init__(format=format, version=version) + + assert self.format == CoreFormat.CAMERA_LIST + self.cameras = cameras + + @staticmethod + def from_dict(obj: Any) -> "CameraList": + base = BaseItem.from_dict(obj) + cameras = from_list(CameraData.from_dict, obj.get("cameras")) + result = CameraList(cameras, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CameraList, self).to_dict() + result["cameras"] = from_list(lambda x: to_class(CameraData, x), self.cameras) + return result + + +format_and_version_to_type[ + (CoreFormat.CAMERA_LIST, FormatVersion.CAMERA_LIST) +] = CameraList diff --git a/src/pyopf/cameras/gps_bias.py b/src/pyopf/cameras/gps_bias.py new file mode 100644 index 0000000..c850c1e --- /dev/null +++ b/src/pyopf/cameras/gps_bias.py @@ -0,0 +1,86 @@ +from typing import Any, Dict, List, Optional + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import from_float, from_list, to_class, to_float, vector_from_list +from ..versions import FormatVersion, format_and_version_to_type + + +class RigidTransformationWithScaling(OpfObject): + """Rigid transform + + Definition of a rigid transformation with rotation, translation, and scaling. Transforms + input points p to output points p' via p' = scale * rotation * p + translation. + """ + + rotation_deg: np.ndarray + """Rotation as Euler angles in degree (see convention for camera rig-relative rotations)""" + scale: float + """Scale""" + translation: np.ndarray + """Translation in units of the processing CRS.""" + + def __init__( + self, rotation_deg: np.ndarray, scale: float, translation: np.ndarray + ) -> None: + self.rotation_deg = rotation_deg + self.scale = scale + self.translation = translation + + @staticmethod + def from_dict(obj: Any) -> "RigidTransformationWithScaling": + assert isinstance(obj, dict) + rotation_deg = vector_from_list(obj.get("rotation_deg"), 3, 3) + scale = from_float(obj.get("scale")) + translation = vector_from_list(obj.get("translation"), 3, 3) + result = RigidTransformationWithScaling(rotation_deg, scale, translation) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(RigidTransformationWithScaling, self).to_dict() + result["rotation_deg"] = from_list(to_float, self.rotation_deg) + result["scale"] = to_float(self.scale) + result["translation"] = from_list(to_float, self.translation) + return result + + +class GpsBias(BaseItem): + """For projects processed with both camera GPS and GCPs, the GPS bias describes a transform + from the (GCP-adjusted) camera output positions to the prior camera GPS positions. For an + output camera point p, a camera GPS point p' is computed as p' = RigidTransformation(p). + Note that both the GPS and camera positions are in the processing CRS. A GPS bias is a + rigid transformation with rotation, translation, and scaling. + """ + + transform: RigidTransformationWithScaling + """Rigid transform""" + + def __init__( + self, + transform: RigidTransformationWithScaling, + format: CoreFormat = CoreFormat.GPS_BIAS, + version: VersionInfo = FormatVersion.GPS_BIAS, + ) -> None: + super(GpsBias, self).__init__(format=format, version=version) + self.transform = transform + + @staticmethod + def from_dict(obj: Any) -> "GpsBias": + base = BaseItem.from_dict(obj) + transform = RigidTransformationWithScaling.from_dict(obj.get("transform")) + result = GpsBias(transform, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(GpsBias, self).to_dict() + result["transform"] = to_class(RigidTransformationWithScaling, self.transform) + return result + + +format_and_version_to_type[(CoreFormat.GPS_BIAS, FormatVersion.GPS_BIAS)] = GpsBias diff --git a/src/pyopf/cameras/input_cameras.py b/src/pyopf/cameras/input_cameras.py new file mode 100644 index 0000000..82b6890 --- /dev/null +++ b/src/pyopf/cameras/input_cameras.py @@ -0,0 +1,574 @@ +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +import dateutil.parser +import numpy as np + +from ..crs import Geolocation +from ..ext.pix4d_input_depth_map import Pix4dInputDepthMap +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import ( + from_float, + from_int, + from_list, + from_none, + from_str, + from_union, + to_class, + to_enum, + to_float, + to_int, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type +from .input_rig_relatives import InputRigRelatives +from .sensor_internals import ( + FisheyeInternals, + Internals, + PerspectiveInternals, + SphericalInternals, +) + + +class ModelSource(str, Enum): + DATABASE = "database" + GENERIC = "generic" + GENERIC_FROM_EXIF = "generic_from_exif" + USER = "user" + + +class StaticPixelRange(OpfObject): + """Defines the range of valid pixel values. Values ≤ min are considered underexposed and + ≥ max overexposed. Can be a [static range](#pixel-range) or a [dynamic + range](#dynamic-pixel-range). + + Static pixel data range given by a minimum and maximum. + """ + + max: float + """Maximum pixel value.""" + min: float + """Minimum pixel value.""" + + def __init__( + self, + min: float, + max: float, + ) -> None: + super(StaticPixelRange, self).__init__() + self.max = max + self.min = min + + @staticmethod + def from_dict(obj: Any) -> "StaticPixelRange": + assert isinstance(obj, dict) + max = from_float(obj.get("max")) + min = from_float(obj.get("min")) + result = StaticPixelRange(min, max) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(StaticPixelRange, self).to_dict() + result["max"] = to_float(self.max) + result["min"] = to_float(self.min) + return result + + +class DynamicPixelRange(OpfObject): + """Defines the range of valid pixel values. Values ≤ min are considered underexposed and + ≥ max overexposed. Can be a [static range](#pixel-range) or a [dynamic + range](#dynamic-pixel-range). + + Dynamically inferred pixel range. The range needs to be derived from the data by looking + at the image content, filtering extreme values at both ends with the given percentile. + """ + + percentile: float + """Percentage of values ignored on both ends of the ordered list of values when computing + the min/max. It must be a positive value and 0 means nothing is ignored. + """ + + def __init__( + self, + percentile: float, + ) -> None: + super(DynamicPixelRange, self).__init__() + self.percentile = percentile + + @staticmethod + def from_dict(obj: Any) -> "DynamicPixelRange": + assert isinstance(obj, dict) + percentile = from_float(obj.get("percentile")) + result = DynamicPixelRange(percentile) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(DynamicPixelRange, self).to_dict() + result["percentile"] = to_float(self.percentile) + return result + + +PixelRange = StaticPixelRange | DynamicPixelRange + + +class PixelType(str, Enum): + FLOAT = "float" + UINT12 = "uint12" + UINT16 = "uint16" + UINT8 = "uint8" + + +class Camera(OpfObject): + """One camera in a capture. It is associated to a sensor via a sensor identifier.""" + + id: Uid64 + image_orientation: Optional[int] + """Specifies the image orientation following [EXIF, page + 37](https://www.jeita.or.jp/japanese/standard/book/CP-3451E_E/#target/page_no=38). 1: no + rotation, no mirror, 2: mirror horizontal, 3: rotate 180 degrees, 4: mirror vertical, 5: + mirror horizontal and rotate 270 degrees CW, 6: rotate 90 degrees CW, 7: mirror + horizontal and rotate 90 degrees CW, 8: rotate 270 degrees CW. + """ + model_source: ModelSource + pixel_range: PixelRange + """Defines the range of valid pixel values. Values ≤ min are considered underexposed and + ≥ max overexposed. Can be a StaticPixelRange or a DynamicPixelRange. + """ + pixel_type: PixelType + sensor_id: Uid64 + input_depth_map: Optional[Pix4dInputDepthMap] + + def __init__( + self, + id: Uid64, + model_source: ModelSource, + pixel_range: PixelRange, + pixel_type: PixelType, + sensor_id: Uid64, + image_orientation: Optional[int] = None, + input_depth_map: Optional[Pix4dInputDepthMap] = None, + ) -> None: + super(Camera, self).__init__() + self.id = id + self.image_orientation = image_orientation + self.model_source = model_source + self.pixel_range = pixel_range + self.pixel_type = pixel_type + self.sensor_id = sensor_id + self.input_depth_map = input_depth_map + + @staticmethod + def from_dict(obj: Any) -> "Camera": + assert isinstance(obj, dict) + id = Uid64(int=int(obj.get("id"))) + image_orientation = from_union( + [from_int, from_none], obj.get("image_orientation") + ) + model_source = ModelSource(obj.get("model_source")) + pixel_range = from_union( + [StaticPixelRange.from_dict, DynamicPixelRange.from_dict], + obj.get("pixel_range"), + ) + pixel_type = PixelType(obj.get("pixel_type")) + sensor_id = Uid64(int=int(obj.get("sensor_id"))) + result = Camera( + id, + model_source, + pixel_range, + pixel_type, + sensor_id, + image_orientation, + ) + result._extract_unknown_properties_and_extensions(obj) + result.input_depth_map = result._extract_known_extension(Pix4dInputDepthMap) + return result + + def to_dict(self) -> dict: + result = super(Camera, self).to_dict(self.input_depth_map) + result["id"] = self.id.int + if self.image_orientation is not None: + result["image_orientation"] = from_union( + [from_int, from_none], self.image_orientation + ) + result["model_source"] = to_enum(ModelSource, self.model_source) + result["pixel_range"] = to_class(PixelRange, self.pixel_range) + result["pixel_type"] = to_enum(PixelType, self.pixel_type) + result["sensor_id"] = self.sensor_id.int + return result + + +class YprOrientation(OpfObject): + """Camera orientation as Yaw-Pitch-Roll + + Yaw-Pitch-Roll angles represent a rotation R_z(yaw)R_y(pitch)R_x(roll) from the image CS + to navigation CRS base change, where the image CS is right-top-back in image space, the + navigation CRS is East-North-Down and angles (0, 0, 0) represent the identity + transformation. + """ + + angles_deg: np.ndarray + """Yaw, pitch, roll angles in degrees.""" + sigmas_deg: np.ndarray + """Error estimation (standard deviation) in degrees.""" + type = "yaw_pitch_roll" + + def __init__( + self, + angles_deg: np.ndarray, + sigmas_deg: np.ndarray, + ) -> None: + super(YprOrientation, self).__init__() + self.angles_deg = angles_deg + self.sigmas_deg = sigmas_deg + + @staticmethod + def from_dict(obj: Any) -> "YprOrientation": + assert isinstance(obj, dict) + assert obj.get("type") == YprOrientation.type + + angles_deg = vector_from_list(obj.get("angles_deg"), 3, 3) + sigmas_deg = vector_from_list(obj.get("sigmas_deg"), 3, 3) + result = YprOrientation(angles_deg, sigmas_deg) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(YprOrientation, self).to_dict() + result["angles_deg"] = from_list(to_float, self.angles_deg) + result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) + result["type"] = from_str(self.type) + return result + + +class OpkOrientation(OpfObject): + """Camera orientation as Omega-Phi-Kappa. + + Omega-Phi-Kappa represent a rotation R_x(ω)R_y(ϕ)R_z(κ) from the image CS to a separately + defined Cartesian CRS, where the image CS is right-top-back in image space. + """ + + angles_deg: np.ndarray + """Omega, phi, kappa angles in degrees.""" + sigmas_deg: np.ndarray + """Error estimation (standard deviation) in degrees.""" + crs: str + """The target CRS of the rotation. A Cartesian horizontal CRS as WKT2 string or `"Auth:code"`.""" + + type = "omega_phi_kappa" + + def __init__( + self, + angles_deg: np.ndarray, + sigmas_deg: np.ndarray, + crs: str, + ) -> None: + super(OpkOrientation, self).__init__() + self.angles_deg = angles_deg + self.sigmas_deg = sigmas_deg + self.crs = crs + + @staticmethod + def from_dict(obj: Any) -> "OpkOrientation": + assert isinstance(obj, dict) + assert obj.get("type") == OpkOrientation.type + + angles_deg = vector_from_list(obj.get("angles_deg"), 3, 3) + sigmas_deg = vector_from_list(obj.get("sigmas_deg"), 3, 3) + crs = from_str(obj.get("crs")) + + result = OpkOrientation(angles_deg, sigmas_deg, crs) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(OpkOrientation, self).to_dict() + result["angles_deg"] = from_list(to_float, self.angles_deg) + result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) + result["type"] = from_str(self.type) + result["crs"] = from_str(self.crs) + return result + + +Orientation = YprOrientation | OpkOrientation + + +class RigModelSource(Enum): + DATABASE = "database" + GENERIC = "generic" + NOT_APPLICABLE = "not_applicable" + USER = "user" + + +class Capture(OpfObject): + """A collection of cameras and spatio-temporal information of an image acquisition event.""" + + cameras: List[Camera] + """List of cameras in the capture.""" + geolocation: Optional[Geolocation] + height_above_takeoff_m: Optional[float] + """Height above the take-off place in meters.""" + id: Uid64 + orientation: Optional[Orientation] + """One of YprOrientation or OpkOrientation""" + reference_camera_id: Uid64 + """ID of the reference camera in a rig. Required also for single camera capture.""" + rig_model_source: RigModelSource + time: datetime + """The time of image acquisition formatted as [ISO + 8601](https://en.wikipedia.org/wiki/ISO_8601). If the timezone is known then the time + should be specified as UTC, if no timezone is given then it is unknown. + """ + + def __init__( + self, + id: Uid64, + cameras: List[Camera], + height_above_takeoff_m: Optional[float], + reference_camera_id: Uid64, + rig_model_source: RigModelSource, + time: datetime, + geolocation: Optional[Geolocation] = None, + orientation: Optional[Orientation] = None, + ) -> None: + super(Capture, self).__init__() + self.cameras = cameras + self.geolocation = geolocation + self.height_above_takeoff_m = height_above_takeoff_m + self.id = id + self.orientation = orientation + self.reference_camera_id = reference_camera_id + self.rig_model_source = rig_model_source + self.time = time + + @staticmethod + def from_dict(obj: Any) -> "Capture": + assert isinstance(obj, dict) + cameras = from_list(Camera.from_dict, obj.get("cameras")) + geolocation = from_union( + [Geolocation.from_dict, from_none], obj.get("geolocation") + ) + height_above_takeoff_m = from_union( + [from_float, from_none], obj.get("height_above_takeoff_m") + ) + id = Uid64(int=int(obj.get("id"))) + orientation = from_union( + [YprOrientation.from_dict, OpkOrientation.from_dict, from_none], + obj.get("orientation"), + ) + reference_camera_id = Uid64(int=int(obj.get("reference_camera_id"))) + rig_model_source = RigModelSource(obj.get("rig_model_source")) + time = dateutil.parser.isoparse(str(obj.get("time"))) + result = Capture( + id, + cameras, + height_above_takeoff_m, + reference_camera_id, + rig_model_source, + time, + geolocation, + orientation, + ) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Capture, self).to_dict() + result["cameras"] = from_list(lambda x: to_class(Camera, x), self.cameras) + if self.geolocation is not None: + result["geolocation"] = from_union( + [lambda x: to_class(Geolocation, x), from_none], self.geolocation + ) + if self.height_above_takeoff_m is not None: + result["height_above_takeoff_m"] = from_union( + [to_float, from_none], self.height_above_takeoff_m + ) + result["id"] = self.id.int + if self.orientation is not None: + result["orientation"] = from_union( + [lambda x: to_class(Orientation, x), from_none], self.orientation + ) + result["reference_camera_id"] = self.reference_camera_id.int + result["rig_model_source"] = to_enum(RigModelSource, self.rig_model_source) + result["time"] = self.time.isoformat() + return result + + +class BandInformation(OpfObject): + """Information about a band""" + + name: Optional[str] + weight: float + """Weights to compute a luminance representation of the image""" + + def __init__( + self, + weight: float, + name: Optional[str] = None, + ) -> None: + super(BandInformation, self).__init__() + self.name = name + assert 0 <= weight <= 1 + self.weight = weight + + @staticmethod + def from_dict(obj: Any) -> "BandInformation": + assert isinstance(obj, dict) + name = from_union([from_str, from_none], obj.get("name")) + weight = from_float(obj.get("weight")) + result = BandInformation(weight, name) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(BandInformation, self).to_dict() + if self.name is not None: + result["name"] = from_union([from_str, from_none], self.name) + result["weight"] = to_float(self.weight) + return result + + +class ShutterType(Enum): + GLOBAL = "global" + ROLLING = "rolling" + + +class Sensor(OpfObject): + """Specifies one sensor model of a physical camera unit, described by lens type, general + parameters and internal optical parameters. + """ + + """Image band properties. The number of items must be equal to the channel count. For + example, an RGB image has the three bands `"Red", "Green", "Blue"`. The weights of all + bands must be greater than or equal to 0 and sum to 1. + """ + bands: List[BandInformation] + id: Uid64 + image_size_px: np.ndarray # 2D vector + """Image width and height in pixels.""" + internals: Internals + name: str + """Sensor name.""" + pixel_size_um: float + """Pixel size in micrometers.""" + rig_relatives: Optional[InputRigRelatives] + shutter_type: ShutterType + + def __init__( + self, + id: Uid64, + name: str, + bands: List[BandInformation], + image_size_px: np.ndarray, + internals: Internals, + pixel_size_um: float, + shutter_type: ShutterType, + rig_relatives: Optional[InputRigRelatives] = None, + ) -> None: + super(Sensor, self).__init__() + self.bands = bands + self.id = id + self.image_size_px = image_size_px + self.internals = internals + self.name = name + self.pixel_size_um = pixel_size_um + self.rig_relatives = rig_relatives + self.shutter_type = shutter_type + + @staticmethod + def from_dict(obj: Any) -> "Sensor": + assert isinstance(obj, dict) + bands = from_list(BandInformation.from_dict, obj.get("bands")) + id = Uid64(int=int(obj.get("id"))) + image_size_px = vector_from_list(obj.get("image_size_px"), 2, 2, dtype=int) + internals = from_union( + [ + SphericalInternals.from_dict, + PerspectiveInternals.from_dict, + FisheyeInternals.from_dict, + ], + obj.get("internals"), + ) + name = from_str(obj.get("name")) + pixel_size_um = from_float(obj.get("pixel_size_um")) + rig_relatives = from_union( + [InputRigRelatives.from_dict, from_none], obj.get("rig_relatives") + ) + shutter_type = ShutterType(obj.get("shutter_type")) + result = Sensor( + id, + name, + bands, + image_size_px, + internals, + pixel_size_um, + shutter_type, + rig_relatives, + ) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Sensor, self).to_dict() + result["bands"] = from_list(lambda x: to_class(BandInformation, x), self.bands) + result["id"] = self.id.int + result["image_size_px"] = from_list(to_int, self.image_size_px) + result["internals"] = to_class(Internals, self.internals) + result["name"] = from_str(self.name) + result["pixel_size_um"] = to_float(self.pixel_size_um) + if self.rig_relatives is not None: + result["rig_relatives"] = from_union( + [lambda x: to_class(InputRigRelatives, x), from_none], + self.rig_relatives, + ) + result["shutter_type"] = to_enum(ShutterType, self.shutter_type) + return result + + +class InputCameras(BaseItem): + """Definition of the input cameras, i.e. the data as provided by the user and camera + database. + """ + + captures: List[Capture] + """List of input captures.""" + sensors: List[Sensor] + """List of input sensors.""" + + def __init__( + self, + captures: List[Capture], + sensors: List[Sensor], + format: CoreFormat = CoreFormat.INPUT_CAMERAS, + version: VersionInfo = FormatVersion.INPUT_CAMERAS, + ) -> None: + super(InputCameras, self).__init__(format=format, version=version) + + self.captures = captures + self.sensors = sensors + + @staticmethod + def from_dict(obj: Any) -> "InputCameras": + base = BaseItem.from_dict(obj) + + captures = from_list(Capture.from_dict, obj.get("captures")) + sensors = from_list(Sensor.from_dict, obj.get("sensors")) + result = InputCameras(captures, sensors, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(InputCameras, self).to_dict() + result["captures"] = from_list(lambda x: to_class(Capture, x), self.captures) + result["sensors"] = from_list(lambda x: to_class(Sensor, x), self.sensors) + return result + + +format_and_version_to_type[ + (CoreFormat.INPUT_CAMERAS, FormatVersion.INPUT_CAMERAS) +] = InputCameras diff --git a/src/pyopf/cameras/input_rig_relatives.py b/src/pyopf/cameras/input_rig_relatives.py new file mode 100644 index 0000000..52492db --- /dev/null +++ b/src/pyopf/cameras/input_rig_relatives.py @@ -0,0 +1,104 @@ +from typing import Any + +import numpy as np + +from ..types import OpfObject +from ..util import from_list, to_class, to_float, vector_from_list + + +class RigRelativeRotation(OpfObject): + """Input camera rig rotation relative to the reference camera.""" + + angles_deg: np.ndarray # 3D vector + """Euler angles in degree (see convention [here](auxiliary_objects.md#rig-relatives)).""" + sigmas_deg: np.ndarray # 3D vector + """Measurement error (standard deviation) in degrees.""" + + def __init__( + self, + angles_deg: np.ndarray, + sigmas_deg: np.ndarray, + ) -> None: + super(RigRelativeRotation, self).__init__() + self.angles_deg = angles_deg + self.sigmas_deg = sigmas_deg + + @staticmethod + def from_dict(obj: Any) -> "RigRelativeRotation": + assert isinstance(obj, dict) + angles_deg = vector_from_list(obj.get("angles_deg"), 3, 3) + sigmas_deg = vector_from_list(obj.get("sigmas_deg"), 3, 3) + result = RigRelativeRotation(angles_deg, sigmas_deg) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(RigRelativeRotation, self).to_dict() + result["angles_deg"] = from_list(to_float, self.angles_deg) + result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) + return result + + +class RigRelativeTranslation(OpfObject): + """Input camera rig translation relative to the reference camera.""" + + sigmas_m: np.ndarray + """Measurement error (standard deviation) in meters.""" + values_m: np.ndarray + """Relative translation of the secondary sensor in the image CS of the reference sensor in + meters. + """ + + def __init__(self, sigmas_m: np.ndarray, values_m: np.ndarray) -> None: + super(RigRelativeTranslation, self).__init__() + self.sigmas_m = sigmas_m + self.values_m = values_m + + @staticmethod + def from_dict(obj: Any) -> "RigRelativeTranslation": + assert isinstance(obj, dict) + sigmas_m = vector_from_list(obj.get("sigmas_m"), 3, 3) + values_m = vector_from_list(obj.get("values_m"), 3, 3) + result = RigRelativeTranslation(sigmas_m, values_m) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(RigRelativeTranslation, self).to_dict() + result["sigmas_m"] = from_list(to_float, self.sigmas_m) + result["values_m"] = from_list(to_float, self.values_m) + return result + + +class InputRigRelatives(OpfObject): + """Input rig relatives contain the a priori knowledge about the relative translation and + rotation of secondary cameras. Since these values are supposedly coming from a sensor + database, the units are always meters and degrees. + """ + + rotation: RigRelativeRotation + translation: RigRelativeTranslation + + def __init__( + self, + rotation: RigRelativeRotation, + translation: RigRelativeTranslation, + ) -> None: + super(InputRigRelatives, self).__init__() + self.rotation = rotation + self.translation = translation + + @staticmethod + def from_dict(obj: Any) -> "InputRigRelatives": + assert isinstance(obj, dict) + rotation = RigRelativeRotation.from_dict(obj.get("rotation")) + translation = RigRelativeTranslation.from_dict(obj.get("translation")) + result = InputRigRelatives(rotation, translation) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(InputRigRelatives, self).to_dict() + result["rotation"] = to_class(RigRelativeRotation, self.rotation) + result["translation"] = to_class(RigRelativeTranslation, self.translation) + return result diff --git a/src/pyopf/cameras/projected_input_cameras.py b/src/pyopf/cameras/projected_input_cameras.py new file mode 100644 index 0000000..9f725ba --- /dev/null +++ b/src/pyopf/cameras/projected_input_cameras.py @@ -0,0 +1,260 @@ +from typing import Any, List, Optional + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import ( + from_list, + from_none, + from_union, + to_class, + to_float, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type + + +class ProjectedGeolocation(OpfObject): + """Input geolocation in the processing CRS axes and units.""" + + position: np.ndarray + """Coordinates in the processing CRS.""" + sigmas: np.ndarray + """Standard deviation in the processing CRS units.""" + + def __init__( + self, + position: np.ndarray, + sigmas: np.ndarray, + ) -> None: + super(ProjectedGeolocation, self).__init__() + self.position = position + self.sigmas = sigmas + + @staticmethod + def from_dict(obj: Any) -> "ProjectedGeolocation": + assert isinstance(obj, dict) + position = vector_from_list(obj.get("position"), 3, 3) + sigmas = vector_from_list(obj.get("sigmas"), 3, 3) + result = ProjectedGeolocation(position, sigmas) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedGeolocation, self).to_dict() + result["position"] = from_list(to_float, self.position) + result["sigmas"] = from_list(to_float, self.sigmas) + return result + + +class ProjectedOrientation(OpfObject): + """Input orientation in the processing CRS axes.""" + + angles_deg: np.ndarray + """Omega-Phi-Kappa angles in degree representing a rotation R_x(ω)R_y(ϕ)R_z(κ) from the + image CS to the processing CRS. + """ + sigmas_deg: np.ndarray + """Standard deviation of Omega-Phi-Kappa angles in degree.""" + + def __init__( + self, + angles_deg: np.ndarray, + sigmas_deg: np.ndarray, + ) -> None: + super(ProjectedOrientation, self).__init__() + self.angles_deg = angles_deg + self.sigmas_deg = sigmas_deg + + @staticmethod + def from_dict(obj: Any) -> "ProjectedOrientation": + assert isinstance(obj, dict) + angles_deg = vector_from_list(obj.get("angles_deg"), 3, 3) + sigmas_deg = vector_from_list(obj.get("sigmas_deg"), 3, 3) + result = ProjectedOrientation(angles_deg, sigmas_deg) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedOrientation, self).to_dict() + result["angles_deg"] = from_list(to_float, self.angles_deg) + result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) + return result + + +class ProjectedCapture(OpfObject): + """Processing CRS dependent parameters of a capture sensor.""" + + geolocation: Optional[ProjectedGeolocation] + """Unique identifier pointing to a capture element in the input cameras.""" + id: Uid64 + orientation: Optional[ProjectedOrientation] + + def __init__( + self, + id: Uid64, + geolocation: Optional[ProjectedGeolocation] = None, + orientation: Optional[ProjectedOrientation] = None, + ) -> None: + super(ProjectedCapture, self).__init__() + self.geolocation = geolocation + self.id = id + self.orientation = orientation + + @staticmethod + def from_dict(obj: Any) -> "ProjectedCapture": + assert isinstance(obj, dict) + geolocation = from_union( + [ProjectedGeolocation.from_dict, from_none], obj.get("geolocation") + ) + id = Uid64(int=int(obj.get("id"))) + orientation = from_union( + [ProjectedOrientation.from_dict, from_none], obj.get("orientation") + ) + result = ProjectedCapture(id, geolocation, orientation) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedCapture, self).to_dict() + if self.geolocation is not None: + result["geolocation"] = from_union( + [lambda x: to_class(ProjectedGeolocation, x), from_none], + self.geolocation, + ) + result["id"] = self.id.int + if self.orientation is not None: + result["orientation"] = from_union( + [lambda x: to_class(ProjectedOrientation, x), from_none], + self.orientation, + ) + return result + + +class ProjectedRigTranslation(OpfObject): + """Projected rig relatives only contain the relative translation as the relative rotation + stays the same as the input. The difference between the projected rig translation and + input rig translation is that the projected translation uses units of the processing CRS. + """ + + sigmas: np.ndarray + """Measurement error (standard deviation) in processing CRS units.""" + values: np.ndarray + """Relative translation in processing CRS units.""" + + def __init__( + self, + sigmas: np.ndarray, + values: np.ndarray, + ) -> None: + super(ProjectedRigTranslation, self).__init__() + self.sigmas = sigmas + self.values = values + + @staticmethod + def from_dict(obj: Any) -> "ProjectedRigTranslation": + assert isinstance(obj, dict) + sigmas = vector_from_list(obj.get("sigmas"), 3, 3) + values = vector_from_list(obj.get("values"), 3, 3) + result = ProjectedRigTranslation(sigmas, values) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedRigTranslation, self).to_dict() + result["sigmas"] = from_list(to_float, self.sigmas) + result["values"] = from_list(to_float, self.values) + return result + + +class ProjectedSensor(OpfObject): + """Processing CRS dependent parameters of an input sensor.""" + + id: Uid64 + """Unique identifier pointing to a sensor element in the input cameras.""" + rig_translation: Optional[ProjectedRigTranslation] + + def __init__( + self, + id: Uid64, + rig_translation: Optional[ProjectedRigTranslation] = None, + ) -> None: + super(ProjectedSensor, self).__init__() + self.id = id + self.rig_translation = rig_translation + + @staticmethod + def from_dict(obj: Any) -> "ProjectedSensor": + assert isinstance(obj, dict) + id = Uid64(int=int(obj.get("id"))) + rig_translation = from_union( + [ProjectedRigTranslation.from_dict, from_none], obj.get("rig_translation") + ) + result = ProjectedSensor(id, rig_translation) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedSensor, self).to_dict() + result["id"] = self.id.int + if self.rig_translation is not None: + result["rig_translation"] = from_union( + [lambda x: to_class(ProjectedRigTranslation, x), from_none], + self.rig_translation, + ) + return result + + +class ProjectedInputCameras(BaseItem): + """Definition of the input cameras data in the processing CRS, which is a projected + right-handed isometric CS. + """ + + captures: List[ProjectedCapture] + """Captures for which there are processing CRS dependent parameters.""" + sensors: List[ProjectedSensor] + """Sensors for which there are processing CRS dependent parameters, for example rigs. May + contain fewer elements than the sensor list from the corresponding input cameras (or none + if there are no rigs). + """ + + def __init__( + self, + captures: List[ProjectedCapture], + sensors: List[ProjectedSensor], + format: CoreFormat = CoreFormat.PROJECTED_INPUT_CAMERAS, + version: VersionInfo = FormatVersion.PROJECTED_INPUT_CAMERAS, + ) -> None: + super(ProjectedInputCameras, self).__init__(format=format, version=version) + + self.captures = captures + self.sensors = sensors + + @staticmethod + def from_dict(obj: Any) -> "ProjectedInputCameras": + base = BaseItem.from_dict(obj) + + captures = from_list(ProjectedCapture.from_dict, obj.get("captures")) + sensors = from_list(ProjectedSensor.from_dict, obj.get("sensors")) + result = ProjectedInputCameras(captures, sensors, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(ProjectedInputCameras, self).to_dict() + + result["captures"] = from_list( + lambda x: to_class(ProjectedCapture, x), self.captures + ) + result["sensors"] = from_list( + lambda x: to_class(ProjectedSensor, x), self.sensors + ) + return result + + +format_and_version_to_type[ + (CoreFormat.PROJECTED_INPUT_CAMERAS, FormatVersion.PROJECTED_INPUT_CAMERAS) +] = ProjectedInputCameras diff --git a/src/pyopf/cameras/sensor_internals.py b/src/pyopf/cameras/sensor_internals.py new file mode 100644 index 0000000..5dcdf82 --- /dev/null +++ b/src/pyopf/cameras/sensor_internals.py @@ -0,0 +1,172 @@ +from typing import Any + +import numpy as np + +from ..types import OpfObject +from ..util import ( + from_bool, + from_float, + from_list, + from_str, + to_float, + vector_from_list, +) + + +class SphericalInternals(OpfObject): + """Parameters of the spherical camera model are described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089).""" + + type = "spherical" + + principal_point_px: np.ndarray # 2D vector + """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" + + def __init__( + self, + principal_point_px: np.ndarray, + ) -> None: + super(SphericalInternals, self).__init__() + self.principal_point_px = principal_point_px + + @staticmethod + def from_dict(obj: Any) -> "SphericalInternals": + assert isinstance(obj, dict) + assert obj.get("type") == SphericalInternals.type + + principal_point_px = vector_from_list(obj.get("principal_point_px"), 2, 2) + result = SphericalInternals(principal_point_px) + result._extract_unknown_properties_and_extensions(obj, ["type"]) + return result + + def to_dict(self) -> dict: + result = super(SphericalInternals, self).to_dict() + result["principal_point_px"] = from_list(to_float, self.principal_point_px) + result["type"] = from_str(self.type) + return result + + +class PerspectiveInternals(OpfObject): + """Parameters of the perspective camera model as described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089#label1).""" + + principal_point_px: np.ndarray # 2D vector + """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" + focal_length_px: float + """Focal length in pixels.""" + radial_distortion: np.ndarray # 3D vector + """The radial distortion coefficients (R1, R2, R3).""" + tangential_distortion: np.ndarray # 2D vector + """The tangential distortion coefficients (T1, T2).""" + + type = "perspective" + + def __init__( + self, + principal_point_px: np.ndarray, + focal_length_px: float, + radial_distortion: np.ndarray, + tangential_distortion: np.ndarray, + ) -> None: + super(PerspectiveInternals, self).__init__() + self.focal_length_px = focal_length_px + self.principal_point_px = principal_point_px + self.radial_distortion = radial_distortion + self.tangential_distortion = tangential_distortion + + @staticmethod + def from_dict(obj: Any) -> "PerspectiveInternals": + assert isinstance(obj, dict) + assert obj.get("type") == PerspectiveInternals.type + + focal_length_px = from_float(obj.get("focal_length_px")) + principal_point_px = vector_from_list(obj.get("principal_point_px"), 2, 2) + radial_distortion = vector_from_list(obj.get("radial_distortion"), 3, 3) + tangential_distortion = vector_from_list(obj.get("tangential_distortion"), 2, 2) + + result = PerspectiveInternals( + principal_point_px, + focal_length_px, + radial_distortion, + tangential_distortion, + ) + result._extract_unknown_properties_and_extensions(obj, ["type"]) + return result + + def to_dict(self) -> dict: + result = super(PerspectiveInternals, self).to_dict() + result["focal_length_px"] = from_float(self.focal_length_px) + result["principal_point_px"] = from_list(to_float, self.principal_point_px) + result["radial_distortion"] = from_list(to_float, self.radial_distortion) + result["tangential_distortion"] = from_list( + to_float, self.tangential_distortion + ) + result["type"] = from_str(self.type) + return result + + +class FisheyeInternals(OpfObject): + """Parameters of the fisheye camera model as described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089#label2).""" + + principal_point_px: np.ndarray # 2D vector + """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" + type = "fisheye" + affine: np.ndarray # 4D vector + """Affine transformation parameters as [ c d; e f ]""" + is_p0_zero: bool + """If true, it is prior knowledge that the first polynomial coefficient is equal to zero and + should be kept zero. + """ + is_symmetric_affine: bool + """If true, it is prior knowledge that the affine matrix is symmetric (that is, c=f and + d=e=0) and should be kept symmetric. + """ + polynomial: np.ndarray + """The coefficients of the distortion polynomial.""" + + def __init__( + self, + principal_point_px: np.ndarray, + affine: np.ndarray, + is_p0_zero: bool, + is_symmetric_affine: bool, + polynomial: np.ndarray, + ) -> None: + super(FisheyeInternals, self).__init__() + self.principal_point_px = principal_point_px + self.affine = affine + self.is_p0_zero = is_p0_zero + self.is_symmetric_affine = is_symmetric_affine + self.polynomial = polynomial + + @staticmethod + def from_dict(obj: Any) -> "FisheyeInternals": + assert isinstance(obj, dict) + assert obj.get("type") == FisheyeInternals.type + + principal_point_px = vector_from_list(obj.get("principal_point_px"), 2, 2) + affine = vector_from_list(obj.get("affine"), 4, 4) + is_p0_zero = from_bool(obj.get("is_p0_zero")) + is_symmetric_affine = from_bool(obj.get("is_symmetric_affine")) + polynomial = np.array(from_list(from_float, obj.get("polynomial"))) + + result = FisheyeInternals( + principal_point_px, + affine, + is_p0_zero, + is_symmetric_affine, + polynomial, + ) + result._extract_unknown_properties_and_extensions(obj, ["type"]) + return result + + def to_dict(self) -> dict: + result = super(FisheyeInternals, self).to_dict() + result["principal_point_px"] = from_list(to_float, self.principal_point_px) + result["affine"] = from_list(to_float, self.affine) + result["is_p0_zero"] = from_bool(self.is_p0_zero) + result["is_symmetric_affine"] = from_bool(self.is_symmetric_affine) + result["polynomial"] = from_list(to_float, self.polynomial) + result["type"] = from_str(self.type) + return result + + +Internals = FisheyeInternals | PerspectiveInternals | SphericalInternals diff --git a/src/pyopf/cps/__init__.py b/src/pyopf/cps/__init__.py new file mode 100644 index 0000000..1dd3c66 --- /dev/null +++ b/src/pyopf/cps/__init__.py @@ -0,0 +1,7 @@ +from .calibrated_control_points import ( + CalibratedControlPoint, + CalibratedControlPoints, +) +from .constraints import Constraints, OrientationConstraint, ScaleConstraint +from .input_control_points import Gcp, InputControlPoints, Mark, Mtp +from .projected_control_points import ProjectedControlPoints, ProjectedGcp diff --git a/src/pyopf/cps/calibrated_control_points.py b/src/pyopf/cps/calibrated_control_points.py new file mode 100644 index 0000000..4e53148 --- /dev/null +++ b/src/pyopf/cps/calibrated_control_points.py @@ -0,0 +1,78 @@ +from typing import Any, List + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..util import from_list, from_str, to_class, to_float, vector_from_list +from ..versions import FormatVersion, format_and_version_to_type + + +class CalibratedControlPoint(OpfObject): + """Optimized 3D position in the processing CRS.""" + + coordinates: np.ndarray + id: str + """A string identifier that matches the corresponding input control point.""" + + def __init__(self, id: str, coordinates: np.ndarray) -> None: + super(CalibratedControlPoint, self).__init__() + self.id = id + self.coordinates = coordinates + + @staticmethod + def from_dict(obj: Any) -> "CalibratedControlPoint": + assert isinstance(obj, dict) + coordinates = vector_from_list(obj.get("coordinates"), 3, 3) + id = from_str(obj.get("id")) + result = CalibratedControlPoint(id, coordinates) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CalibratedControlPoint, self).to_dict() + result["id"] = str(self.id) + result["coordinates"] = from_list(to_float, self.coordinates) + return result + + +class CalibratedControlPoints(BaseItem): + """Definition of calibrated control points, which are the optimised control points with + coordinates expressed in the processing CRS. + """ + + points: List[CalibratedControlPoint] + """List of calibrated control points.""" + + def __init__( + self, + points: List[CalibratedControlPoint], + format: CoreFormat = CoreFormat.CALIBRATED_CONTROL_POINTS, + version: VersionInfo = FormatVersion.CALIBRATED_CONTROL_POINTS, + ) -> None: + super().__init__(format=format, version=version) + + assert self.format == CoreFormat.CALIBRATED_CONTROL_POINTS + + self.points = points + + @staticmethod + def from_dict(obj: Any) -> "CalibratedControlPoints": + base = BaseItem.from_dict(obj) + points = from_list(CalibratedControlPoint.from_dict, obj.get("points")) + result = CalibratedControlPoints(points, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(CalibratedControlPoints, self).to_dict() + result["points"] = from_list( + lambda x: to_class(CalibratedControlPoint, x), self.points + ) + return result + + +format_and_version_to_type[ + (CoreFormat.CALIBRATED_CONTROL_POINTS, FormatVersion.CALIBRATED_CONTROL_POINTS) +] = CalibratedControlPoints diff --git a/src/pyopf/cps/constraints.py b/src/pyopf/cps/constraints.py new file mode 100644 index 0000000..e390535 --- /dev/null +++ b/src/pyopf/cps/constraints.py @@ -0,0 +1,173 @@ +from typing import Any, List + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..util import ( + from_float, + from_list, + from_str, + to_class, + to_float, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type + + +class OrientationConstraint(OpfObject): + """A unique string that identifies the constraint.""" + + id_from: str + id: str + """A string identifier that matches the correspondent input control point.""" + id_to: str + """A string identifier that matches the correspondent input control point.""" + sigma_deg: float + """Accuracy of the alignment expressed as the angle between the unit_vector and the to-from + vector in degrees. + """ + unit_vector: np.ndarray # 3D vector + """Direction in which the to-from vector has to point given as a unit vector in the + processing CRS. + """ + + def __init__( + self, + id: str, + id_from: str, + id_to: str, + unit_vector: np.ndarray, + sigma_deg: float, + ) -> None: + super(OrientationConstraint, self).__init__() + self.id = id + self.id_from = id_from + self.id_to = id_to + self.sigma_deg = sigma_deg + self.unit_vector = unit_vector + + @staticmethod + def from_dict(obj: Any) -> "OrientationConstraint": + assert isinstance(obj, dict) + id = from_str(obj.get("id")) + id_from = from_str(obj.get("id_from")) + id_to = from_str(obj.get("id_to")) + sigma_deg = from_float(obj.get("sigma_deg")) + unit_vector = vector_from_list(obj.get("unit_vector"), 3, 3) + result = OrientationConstraint(id, id_from, id_to, unit_vector, sigma_deg) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(OrientationConstraint, self).to_dict() + result["id"] = from_str(self.id) + result["id_from"] = from_str(self.id_from) + result["id_to"] = from_str(self.id_to) + result["sigma_deg"] = to_float(self.sigma_deg) + result["unit_vector"] = from_list(to_float, self.unit_vector) + return result + + +class ScaleConstraint(OpfObject): + """Distance between the two control points in the processing CRS.""" + + id: str + distance: float + """A unique string that identifies the constraint.""" + id_from: str + """A string identifier that matches the correspondent input control point.""" + id_to: str + """A string identifier that matches the correspondent input control point.""" + sigma: float + """Distance accuracy in the processing CRS.""" + + def __init__( + self, + id: str, + id_from: str, + id_to: str, + distance: float, + sigma: float, + ) -> None: + super(ScaleConstraint, self).__init__() + self.distance = distance + self.id = id + self.id_from = id_from + self.id_to = id_to + self.sigma = sigma + + @staticmethod + def from_dict(obj: Any) -> "ScaleConstraint": + assert isinstance(obj, dict) + distance = from_float(obj.get("distance")) + id = from_str(obj.get("id")) + id_from = from_str(obj.get("id_from")) + id_to = from_str(obj.get("id_to")) + sigma = from_float(obj.get("sigma")) + result = ScaleConstraint(id, id_from, id_to, distance, sigma) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ScaleConstraint, self).to_dict() + result["distance"] = to_float(self.distance) + result["id"] = from_str(self.id) + result["id_from"] = from_str(self.id_from) + result["id_to"] = from_str(self.id_to) + result["sigma"] = to_float(self.sigma) + return result + + +class Constraints(BaseItem): + """Scale and orientation constraints""" + + orientation_constraints: List[OrientationConstraint] + """List of orientation constraints.""" + scale_constraints: List[ScaleConstraint] + """List of scale constraints.""" + + def __init__( + self, + orientation_constraints: List[OrientationConstraint], + scale_constraints: List[ScaleConstraint], + format: CoreFormat = CoreFormat.CONSTRAINTS, + version: VersionInfo = FormatVersion.CONSTRAINTS, + ) -> None: + super(Constraints, self).__init__(format=format, version=version) + + assert self.format == CoreFormat.CONSTRAINTS + self.orientation_constraints = orientation_constraints + self.scale_constraints = scale_constraints + + @staticmethod + def from_dict(obj: Any) -> "Constraints": + base = BaseItem.from_dict(obj) + + orientation_constraints = from_list( + OrientationConstraint.from_dict, obj.get("orientation_constraints") + ) + scale_constraints = from_list( + ScaleConstraint.from_dict, obj.get("scale_constraints") + ) + result = Constraints( + orientation_constraints, scale_constraints, base.format, base.version + ) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Constraints, self).to_dict() + result["orientation_constraints"] = from_list( + lambda x: to_class(OrientationConstraint, x), self.orientation_constraints + ) + result["scale_constraints"] = from_list( + lambda x: to_class(ScaleConstraint, x), self.scale_constraints + ) + return result + + +format_and_version_to_type[ + (CoreFormat.CONSTRAINTS, FormatVersion.CONSTRAINTS) +] = Constraints diff --git a/src/pyopf/cps/input_control_points.py b/src/pyopf/cps/input_control_points.py new file mode 100644 index 0000000..9687f82 --- /dev/null +++ b/src/pyopf/cps/input_control_points.py @@ -0,0 +1,187 @@ +from typing import Any, Dict, List, Optional + +import numpy as np + +from ..crs import Geolocation +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..uid64 import Uid64 +from ..util import ( + from_bool, + from_float, + from_list, + from_none, + from_str, + from_union, + to_class, + to_float, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type + + +class Mark(OpfObject): + """2D image mark""" + + accuracy: float + """A number representing the accuracy of the click, it is used by the calibration algorithm + to estimate the position error of the mark. + """ + camera_id: Uid64 + position_px: np.ndarray # vector of size 2 + """(x,y) pixel location, (0,0) is the center of the top left pixel.""" + + def __init__( + self, + accuracy: float, + camera_id: Uid64, + position_px: np.ndarray, + ) -> None: + super(Mark, self).__init__() + self.accuracy = accuracy + self.camera_id = Uid64(int=int(camera_id)) + self.position_px = position_px + + @staticmethod + def from_dict(obj: Any) -> "Mark": + assert isinstance(obj, dict) + accuracy = from_float(obj.get("accuracy")) + camera_id = Uid64(int=int(obj.get("camera_id"))) + position_px = vector_from_list(obj.get("position_px"), 2, 2) + result = Mark(accuracy, camera_id, position_px) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Mark, self).to_dict() + result["accuracy"] = to_float(self.accuracy) + result["camera_id"] = self.camera_id.int + result["position_px"] = from_list(to_float, self.position_px) + return result + + +class Gcp(OpfObject): + geolocation: Geolocation + id: str + """A unique string that identifies the GCP.""" + is_checkpoint: bool + """If true, the GCP is used only to measure the quality of the calibration results and it + does not affect it. + """ + marks: List[Mark] + """List of marks in the images that correspond to the projections of a 3D point.""" + + def __init__( + self, + id: str, + geolocation: Geolocation, + is_checkpoint: bool, + marks: List[Mark], + ) -> None: + super(Gcp, self).__init__() + self.geolocation = geolocation + self.id = id + self.is_checkpoint = is_checkpoint + self.marks = marks + + @staticmethod + def from_dict(obj: Any) -> "Gcp": + assert isinstance(obj, dict) + geolocation = Geolocation.from_dict(obj.get("geolocation")) + id = from_str(obj.get("id")) + is_checkpoint = from_bool(obj.get("is_checkpoint")) + marks = from_list(Mark.from_dict, obj.get("marks")) + result = Gcp(id, geolocation, is_checkpoint, marks) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Gcp, self).to_dict() + result["geolocation"] = to_class(Geolocation, self.geolocation) + result["id"] = from_str(self.id) + result["is_checkpoint"] = from_bool(self.is_checkpoint) + result["marks"] = from_list(lambda x: to_class(Mark, x), self.marks) + return result + + +class Mtp(OpfObject): + """A unique string that identifies the MTP.""" + + id: str + is_checkpoint: bool + """If true, the MTP is used only to measure the quality of the calibration results and it + does not affect it. + """ + marks: List[Mark] + """List of marks in the images that correspond to the projections of a 3D point.""" + + def __init__( + self, + id: str, + is_checkpoint: bool, + marks: List[Mark], + ) -> None: + super(Mtp, self).__init__() + self.id = id + self.is_checkpoint = is_checkpoint + self.marks = marks + + @staticmethod + def from_dict(obj: Any) -> "Mtp": + assert isinstance(obj, dict) + id = from_str(obj.get("id")) + is_checkpoint = from_bool(obj.get("is_checkpoint")) + marks = from_list(Mark.from_dict, obj.get("marks")) + result = Mtp(id, is_checkpoint, marks) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Mtp, self).to_dict() + result["id"] = from_str(self.id) + result["is_checkpoint"] = from_bool(self.is_checkpoint) + result["marks"] = from_list(lambda x: to_class(Mark, x), self.marks) + return result + + +class InputControlPoints(BaseItem): + """Definition of the input control points, which are the control points in their original CRS""" + + gcps: List[Gcp] + """List of input GCPs.""" + mtps: List[Mtp] + """List of input MTPs.""" + + def __init__( + self, + gcps: List[Gcp], + mtps: List[Mtp], + format: CoreFormat = CoreFormat.INPUT_CONTROL_POINTS, + version: VersionInfo = FormatVersion.INPUT_CONTROL_POINTS, + ) -> None: + super().__init__(format=format, version=version) + + assert self.format == CoreFormat.INPUT_CONTROL_POINTS + self.gcps = gcps + self.mtps = mtps + + @staticmethod + def from_dict(obj: Any) -> "InputControlPoints": + base = BaseItem.from_dict(obj) + gcps = from_list(Gcp.from_dict, obj.get("gcps")) + mtps = from_list(Mtp.from_dict, obj.get("mtps")) + result = InputControlPoints(gcps, mtps, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(InputControlPoints, self).to_dict() + result["gcps"] = from_list(lambda x: to_class(Gcp, x), self.gcps) + result["mtps"] = from_list(lambda x: to_class(Mtp, x), self.mtps) + return result + + +format_and_version_to_type[ + (CoreFormat.INPUT_CONTROL_POINTS, FormatVersion.INPUT_CONTROL_POINTS) +] = InputControlPoints diff --git a/src/pyopf/cps/projected_control_points.py b/src/pyopf/cps/projected_control_points.py new file mode 100644 index 0000000..533575a --- /dev/null +++ b/src/pyopf/cps/projected_control_points.py @@ -0,0 +1,101 @@ +from typing import Any, Dict, List, Optional + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..util import ( + from_bool, + from_float, + from_list, + from_none, + from_str, + from_union, + to_class, + to_float, + vector_from_list, +) +from ..versions import FormatVersion, format_and_version_to_type + + +class ProjectedGcp(OpfObject): + """3D position in the processing CRS.""" + + coordinates: np.ndarray + id: str + """A string identifier that matches the correspondent input GCP.""" + sigmas: np.ndarray + """Standard deviation of the 3D position in processing CRS units.""" + + def __init__( + self, + id: str, + coordinates: np.ndarray, + sigmas: np.ndarray, + ) -> None: + super(ProjectedGcp, self).__init__() + self.id = id + self.coordinates = coordinates + self.sigmas = sigmas + + @staticmethod + def from_dict(obj: Any) -> "ProjectedGcp": + assert isinstance(obj, dict) + + coordinates = vector_from_list(obj.get("coordinates"), 3, 3) + sigmas = vector_from_list(obj.get("sigmas"), 3, 3) + id = from_str(obj.get("id")) + + result = ProjectedGcp(id, coordinates, sigmas) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedGcp, self).to_dict() + result["id"] = str(self.id) + result["coordinates"] = from_list(to_float, self.coordinates) + result["sigmas"] = from_list(to_float, self.sigmas) + + return result + + +class ProjectedControlPoints(BaseItem): + """Definition of projected control points, which are the input control points with + coordinates expressed in the processing CRS + """ + + projected_gcps: List[ProjectedGcp] + """List of projected GCPs.""" + + def __init__( + self, + projected_gcps: List[ProjectedGcp], + format: CoreFormat = CoreFormat.PROJECTED_CONTROL_POINTS, + version: VersionInfo = FormatVersion.PROJECTED_CONTROL_POINTS, + ) -> None: + super().__init__(format=format, version=version) + + assert self.format == CoreFormat.PROJECTED_CONTROL_POINTS + + self.projected_gcps = projected_gcps + + @staticmethod + def from_dict(obj: Any) -> "ProjectedControlPoints": + base = BaseItem.from_dict(obj) + projected_gcps = from_list(ProjectedGcp.from_dict, obj.get("projected_gcps")) + result = ProjectedControlPoints(projected_gcps, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectedControlPoints, self).to_dict() + result["projected_gcps"] = from_list( + lambda x: to_class(ProjectedGcp, x), self.projected_gcps + ) + return result + + +format_and_version_to_type[ + (CoreFormat.PROJECTED_CONTROL_POINTS, FormatVersion.PROJECTED_CONTROL_POINTS) +] = ProjectedControlPoints diff --git a/src/pyopf/crs/__init__.py b/src/pyopf/crs/__init__.py new file mode 100644 index 0000000..8d65de5 --- /dev/null +++ b/src/pyopf/crs/__init__.py @@ -0,0 +1,6 @@ +from .crs import Crs +from .geolocation import Geolocation +from .scene_reference_frame import ( + BaseToTranslatedCanonicalCrsTransform, + SceneReferenceFrame, +) diff --git a/src/pyopf/crs/crs.py b/src/pyopf/crs/crs.py new file mode 100644 index 0000000..4a1742e --- /dev/null +++ b/src/pyopf/crs/crs.py @@ -0,0 +1,61 @@ +from typing import Any, Dict, Optional + +from ..types import Extensions, OpfObject +from ..util import ( + from_float, + from_none, + from_str, + from_union, + to_class, + to_float, +) + + +class Crs(OpfObject): + """Coordinate reference system""" + + """One of:
- A [WKT string version + 2](http://docs.opengeospatial.org/is/18-010r7/18-010r7.html).
- A string in the format + `Authority:code+code` where the first code is for a 2D CRS and the second one if for a + vertical CRS (e.g. `EPSG:4326+5773`). .
- A string in the form + `Authority:code+Auhority:code` where the first code is for a 2D CRS and the second one if + for a vertical CRS.
- A string in the form `Authority:code` where the code is for a 2D + or 3D CRS. + """ + definition: str + geoid_height: Optional[float] + """Constant geoid height over the underlying ellipsoid in the units of the vertical CRS axis.""" + + def __init__( + self, + definition: str, + geoid_height: Optional[float] = None, + ) -> None: + super(Crs, self).__init__() + self.definition = definition + self.geoid_height = geoid_height + + @staticmethod + def from_dict(obj: Any) -> "Crs": + assert isinstance(obj, dict) + definition = from_str(obj.get("definition")) + geoid_height = from_union([from_float, from_none], obj.get("geoid_height")) + result = Crs(definition, geoid_height) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Crs, self).to_dict() + result["definition"] = from_str(self.definition) + if self.geoid_height is not None: + result["geoid_height"] = from_union( + [to_float, from_none], self.geoid_height + ) + return result + + def __eq__(self, other: "Crs") -> bool: + # This is a very naïve comparison, but something smarter requires pyproj + return ( + self.definition == other.definition + and self.geoid_height == other.geoid_height + ) diff --git a/src/pyopf/crs/geolocation.py b/src/pyopf/crs/geolocation.py new file mode 100644 index 0000000..2f783c8 --- /dev/null +++ b/src/pyopf/crs/geolocation.py @@ -0,0 +1,59 @@ +from typing import Any, Dict, List, Optional + +import numpy as np + +from ..types import OpfObject +from ..util import ( + from_float, + from_list, + from_none, + from_union, + to_class, + to_float, + vector_from_list, +) +from .crs import Crs + + +class Geolocation(OpfObject): + """Geolocation information""" + + coordinates: np.ndarray # 3D vector + """3D coordinates of a point using the same axis convention as declared by the CRS, i.e., + the X, Y axes are **not** always Easting-Northing. + """ + crs: Crs + sigmas: np.ndarray # 3D vector + """Standard deviation of a measured position. For geographic CRSs, all units are meters. For + Cartesian CRSs, the units are given by the 3D promoted definition of the axes (see the + specification of the coordinate reference system above for the definition of the + promotion). + """ + + def __init__( + self, + coordinates: np.ndarray, + crs: Crs, + sigmas: np.ndarray, + ) -> None: + super(Geolocation, self).__init__() + self.coordinates = coordinates + self.crs = crs + self.sigmas = sigmas + + @staticmethod + def from_dict(obj: Any) -> "Geolocation": + assert isinstance(obj, dict) + coordinates = vector_from_list(obj.get("coordinates"), 3, 3) + crs = Crs.from_dict(obj.get("crs")) + sigmas = vector_from_list(obj.get("sigmas"), 3, 3) + result = Geolocation(coordinates, crs, sigmas) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result: dict = super(Geolocation, self).to_dict() + result["coordinates"] = from_list(to_float, self.coordinates) + result["crs"] = to_class(Crs, self.crs) + result["sigmas"] = from_list(to_float, self.sigmas) + return result diff --git a/src/pyopf/crs/scene_reference_frame.py b/src/pyopf/crs/scene_reference_frame.py new file mode 100644 index 0000000..cc59cd8 --- /dev/null +++ b/src/pyopf/crs/scene_reference_frame.py @@ -0,0 +1,96 @@ +from typing import Any + +import numpy as np + +from ..formats import CoreFormat +from ..items import BaseItem +from ..types import OpfObject, VersionInfo +from ..util import from_bool, from_list, to_class, to_float, vector_from_list +from ..versions import FormatVersion, format_and_version_to_type +from .crs import Crs + + +class BaseToTranslatedCanonicalCrsTransform(OpfObject): + """Per axis scaling factors to make the base CRS isometric.""" + + scale: np.ndarray # array of size 3 + shift: np.ndarray # array of size 3 + """Translation from the canonical CRS to a recentered reference frame suitable for + processing and visualization. + """ + swap_xy: bool + """true if and only if the base CRS is left-handed.""" + + def __init__( + self, + scale: np.ndarray, + shift: np.ndarray, + swap_xy: bool, + ) -> None: + self.scale = scale + super(BaseToTranslatedCanonicalCrsTransform, self).__init__() + self.shift = shift + self.swap_xy = swap_xy + + @staticmethod + def from_dict(obj: Any) -> "BaseToTranslatedCanonicalCrsTransform": + assert isinstance(obj, dict) + scale = vector_from_list(obj.get("scale"), 3, 3) + shift = vector_from_list(obj.get("shift"), 3, 3) + swap_xy = from_bool(obj.get("swap_xy")) + result = BaseToTranslatedCanonicalCrsTransform(scale, shift, swap_xy) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(BaseToTranslatedCanonicalCrsTransform, self).to_dict() + result["scale"] = from_list(to_float, self.scale) + result["shift"] = from_list(to_float, self.shift) + result["swap_xy"] = from_bool(self.swap_xy) + return result + + +class SceneReferenceFrame(BaseItem): + """An object that specifies a base Cartesian CRS and the transformation parameters to a + translated canonical form suitable for processing and visualization. + """ + + base_to_canonical: BaseToTranslatedCanonicalCrsTransform + crs: Crs + + def __init__( + self, + base_to_canonical: BaseToTranslatedCanonicalCrsTransform, + crs: Crs, + format: CoreFormat = CoreFormat.SCENE_REFERENCE_FRAME, + version: VersionInfo = FormatVersion.SCENE_REFERENCE_FRAME, + ) -> None: + super(SceneReferenceFrame, self).__init__(format=format, version=version) + + assert self.format == CoreFormat.SCENE_REFERENCE_FRAME + self.base_to_canonical = base_to_canonical + self.crs = crs + + @staticmethod + def from_dict(obj: Any) -> "SceneReferenceFrame": + base = BaseItem.from_dict(obj) + base_to_canonical = BaseToTranslatedCanonicalCrsTransform.from_dict( + obj.get("base_to_canonical") + ) + crs = Crs.from_dict(obj.get("crs")) + result = SceneReferenceFrame(base_to_canonical, crs, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(SceneReferenceFrame, self).to_dict() + result["base_to_canonical"] = to_class( + BaseToTranslatedCanonicalCrsTransform, self.base_to_canonical + ) + result["crs"] = to_class(Crs, self.crs) + return result + + +format_and_version_to_type[ + (CoreFormat.SCENE_REFERENCE_FRAME, FormatVersion.SCENE_REFERENCE_FRAME) +] = SceneReferenceFrame diff --git a/src/pyopf/ext/__init__.py b/src/pyopf/ext/__init__.py new file mode 100644 index 0000000..1757249 --- /dev/null +++ b/src/pyopf/ext/__init__.py @@ -0,0 +1,4 @@ +from .pix4d_region_of_interest import Pix4DRegionOfInterest +from .pix4d_region_of_interest import format as region_of_interest_format +from .pix4d_region_of_interest import version as region_of_interest_version +from .plane import Plane diff --git a/src/pyopf/ext/pix4d_input_depth_map.py b/src/pyopf/ext/pix4d_input_depth_map.py new file mode 100644 index 0000000..5154879 --- /dev/null +++ b/src/pyopf/ext/pix4d_input_depth_map.py @@ -0,0 +1,113 @@ +from typing import Any, Dict, List, Optional, Union + +from ..items import ExtensionItem +from ..types import OpfPropertyExtObject +from ..uid64 import Uid64 +from ..util import ( + from_float, + from_none, + from_str, + from_union, + from_version_info, + to_class, + to_float, +) +from ..versions import VersionInfo + +_version = VersionInfo(1, 0, "draft2") + + +class DepthMapConfidence(OpfPropertyExtObject): + """A confidence map indicates the level of confidence of the depth measurements. If present, + it must be of the same dimension as the depth map. Valid confidence values range from a + `min` (lowest confidence) to a `max` (highest confidence). + """ + + """The confidence map UID in the camera list.""" + id: Uid64 + """Maximum confidence value to consider a depth measurement valid.""" + max: float + """Minimum confidence value to consider a depth measurement valid.""" + min: float + + def __init__(self, id: Uid64, max: float, min: float) -> None: + super(DepthMapConfidence, self).__init__() + self.id = id + self.max = max + self.min = min + + @staticmethod + def from_dict(obj: Any) -> "DepthMapConfidence": + assert isinstance(obj, dict) + id = Uid64(int=obj.get("id")) + max = from_float(obj.get("max")) + min = from_float(obj.get("min")) + result = DepthMapConfidence(id, max, min) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(DepthMapConfidence, self).to_dict() + result["id"] = self.id.int + result["max"] = to_float(self.max) + result["min"] = to_float(self.min) + return result + + +class Pix4dInputDepthMap(OpfPropertyExtObject): + """Reference to depth information for input cameras, for example for RGB-D type sensors. In + a depth map, each pixel represents the estimated distance from the device to its + environment on the camera depth axis. A depth map image is aligned with an RGB image but + it may have a different resolution. An optional confidence map may be provided as well. + """ + + """The depth map UID in the camera list.""" + id: Uid64 + """Multiply this scale factor with depth maps values in order to obtain values in meters. + For example, if the depth map values represent millimeters the scale factor is 0.001 + (e.g. a value of 1000mm corresponds to 1m). If not specified, defaults to 1. + """ + unit_to_meters: Optional[float] + confidence: Optional[DepthMapConfidence] + extension_name = "PIX4D_input_depth_map" + + def __init__( + self, + id: Uid64, + unit_to_meters: Optional[float], + confidence: Optional[DepthMapConfidence], + version=_version, + ) -> None: + self.id = id + self.unit_to_meters = unit_to_meters + self.confidence = confidence + self.version = version + + @staticmethod + def from_dict(obj: Any) -> "Pix4dInputDepthMap": + assert isinstance(obj, dict) + confidence = from_union( + [DepthMapConfidence.from_dict, from_none], obj.get("confidence") + ) + + id = Uid64(obj.get("id")) + unit_to_meters = from_union([from_float, from_none], obj.get("unit_to_meters")) + version = from_union([from_version_info, VersionInfo.parse], obj.get("version")) + result = Pix4dInputDepthMap(id, unit_to_meters, confidence, version) + result._extract_unknown_properties_and_extensions(obj) + + return result + + def to_dict(self) -> dict: + result: dict = {} + if self.confidence is not None: + result["confidence"] = from_union( + [lambda x: to_class(DepthMapConfidence, x), from_none], self.confidence + ) + result["id"] = self.id.int + if self.unit_to_meters is not None: + result["unit_to_meters"] = from_union( + [to_float, from_none], self.unit_to_meters + ) + result["version"] = str(self.version) + return result diff --git a/src/pyopf/ext/pix4d_region_of_interest.py b/src/pyopf/ext/pix4d_region_of_interest.py new file mode 100644 index 0000000..bbe1d00 --- /dev/null +++ b/src/pyopf/ext/pix4d_region_of_interest.py @@ -0,0 +1,57 @@ +from typing import Any, Optional + +from ..formats import ExtensionFormat +from ..items import ExtensionItem +from ..util import from_float, from_none, from_union, to_class, to_float +from ..versions import VersionInfo, format_and_version_to_type +from .plane import Plane + +format = ExtensionFormat("application/ext-pix4d-region-of-interest+json") +version = VersionInfo(1, 0, "draft1") + + +class Pix4DRegionOfInterest(ExtensionItem): + + """Definition of a region of interest: a planar polygon with holes and an optional + height, defined as a the distance from the plane in the normal direction. All the + points on the hemispace where the normal lies that project inside the polygon and is at a + distance less than the height of the ROI, is considered to be within. + """ + + plane: Plane + """The height of the ROI volume, defined as a limit distance from the plane in the normal + direction. If not specified, the height is assumed to be infinite. + """ + height: Optional[float] + + def __init__( + self, + plane: Plane, + height: Optional[float], + format: ExtensionFormat = format, + version: VersionInfo = version, + ) -> None: + super(Pix4DRegionOfInterest, self).__init__(format=format, version=version) + + assert self.format == format + self.plane = plane + self.height = height + + @staticmethod + def from_dict(obj: Any) -> "Pix4DRegionOfInterest": + base = ExtensionItem.from_dict(obj) + plane = Plane.from_dict(obj.get("plane")) + height = from_union([from_float, from_none], obj.get("height")) + result = Pix4DRegionOfInterest(plane, height, base.format, base.version) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(Pix4DRegionOfInterest, self).to_dict() + result["plane"] = to_class(Plane, self.plane) + if self.height is not None: + result["height"] = from_union([to_float, from_none], self.height) + return result + + +format_and_version_to_type[(format, version)] = Pix4DRegionOfInterest diff --git a/src/pyopf/ext/plane.py b/src/pyopf/ext/plane.py new file mode 100644 index 0000000..f00e2f3 --- /dev/null +++ b/src/pyopf/ext/plane.py @@ -0,0 +1,70 @@ +from typing import Any, List, Optional + +import numpy as np + +from ..formats import ExtensionFormat +from ..items import ExtensionItem +from ..util import ( + IntType, + from_int, + from_list, + from_none, + from_union, + to_float, + vector_from_list, +) +from ..versions import VersionInfo + +format = ExtensionFormat("application/ext-pix4d-planes+json") +version = VersionInfo(1, 0, "draft1") + + +class Plane(ExtensionItem): + """List of inner boundaries.""" + + inner_boundaries: Optional[List[List[IntType]]] + """Plane normal direction.""" + normal_vector: np.ndarray + """List of indices in the 3D vertices array.""" + outer_boundary: List[IntType] + """List of 3D vertices.""" + vertices3d: List[np.ndarray] + + def __init__( + self, + vertices3d: List[np.ndarray], + normal_vector: np.ndarray, + outer_boundary: List[IntType], + inner_boundaries: Optional[List[List[IntType]]] = None, + ) -> None: + super(Plane, self).__init__(format=format, version=version) + self.vertices3d = vertices3d + self.normal_vector = normal_vector + self.outer_boundary = outer_boundary + self.inner_boundaries = inner_boundaries + + @staticmethod + def from_dict(obj: Any) -> "Plane": + assert isinstance(obj, dict) + inner_boundaries = from_union( + [lambda x: from_list(lambda x: from_list(from_int, x), x), from_none], + obj.get("inner_boundaries"), + ) + normal_vector = vector_from_list(obj.get("normal_vector")) + outer_boundary = from_list(from_int, obj.get("outer_boundary")) + vertices3d = from_list(lambda x: vector_from_list(x), obj.get("vertices3d")) + return Plane(vertices3d, normal_vector, outer_boundary, inner_boundaries) + + def to_dict(self) -> dict: + result: dict = {} + if self.inner_boundaries is not None: + result["inner_boundaries"] = from_union( + [lambda x: from_list(lambda x: from_list(from_int, x), x), from_none], + self.inner_boundaries, + ) + result["normal_vector"] = from_list(to_float, self.normal_vector) + result["outer_boundary"] = from_list(from_int, self.outer_boundary) + result["vertices3d"] = from_list( + lambda x: from_list(to_float, x), self.vertices3d + ) + return result diff --git a/src/pyopf/formats.py b/src/pyopf/formats.py new file mode 100644 index 0000000..1265ff8 --- /dev/null +++ b/src/pyopf/formats.py @@ -0,0 +1,85 @@ +from enum import Enum +from types import DynamicClassAttribute +from typing import Any, Dict, Optional + +from .util import from_union + + +class CoreFormat(str, Enum): + @DynamicClassAttribute + def name(self): + return self.value + + CALIBRATED_CAMERAS = "application/opf-calibrated-cameras+json" + CALIBRATED_CONTROL_POINTS = "application/opf-calibrated-control-points+json" + CAMERA_LIST = "application/opf-camera-list+json" + CONSTRAINTS = "application/opf-constraints+json" + GLTF_MODEL = "model/gltf+json" + GLTF_BUFFER = "application/gltf-buffer+bin" + GPS_BIAS = "application/opf-gps-bias+json" + INPUT_CAMERAS = "application/opf-input-cameras+json" + INPUT_CONTROL_POINTS = "application/opf-input-control-points+json" + PROJECTED_CONTROL_POINTS = "application/opf-projected-control-points+json" + PROJECTED_INPUT_CAMERAS = "application/opf-projected-input-cameras+json" + PROJECT = "application/opf-project+json" + SCENE_REFERENCE_FRAME = "application/opf-scene-reference-frame+json" + + +class NamedFormat(str): + name: str + + def __init__(self, name: str): + self.name = name + + def __str__(self): + return self.name + + def __repr__(self): + return '%s("%s")' % (self.__class__.__name__, self.name) + + def __hash__(self): + return hash(self.name) + + def __eq__(self, other: Any): + if isinstance(other, self.__class__): + return self.name == other.name + else: + return False + + @property + def value(self): + return self.name + + +class ExtensionFormat(NamedFormat): + """A extension string formatted as "application/ext-vendor-extension_name+format""" + + def __init__(self, name: str): + prefix = "application/ext-" + assert name[: len(prefix)] == prefix + super().__init__(name) + + +class UnknownFormat(NamedFormat): + def __init__(self, name: str): + super().__init__(name) + + +Format = CoreFormat | ExtensionFormat | UnknownFormat +Extensions = Optional[Dict[str, Dict[str, Any]]] + + +def format_from_str(x: Any) -> Format: + return from_union([CoreFormat, ExtensionFormat, UnknownFormat], x) + + +def format_to_str(x: Format) -> str: + if isinstance(x, CoreFormat): + return x.value + else: + return x.name + + +def from_format(x: Format) -> Format: + assert isinstance(x, Format) + return x diff --git a/src/pyopf/io/__init__.py b/src/pyopf/io/__init__.py new file mode 100644 index 0000000..eeafeed --- /dev/null +++ b/src/pyopf/io/__init__.py @@ -0,0 +1,2 @@ +from .loaders import UnsupportedResource, load +from .savers import save diff --git a/src/pyopf/io/loaders.py b/src/pyopf/io/loaders.py new file mode 100644 index 0000000..c85729c --- /dev/null +++ b/src/pyopf/io/loaders.py @@ -0,0 +1,147 @@ +import json +from pathlib import Path +from typing import Any, Optional +from urllib.parse import unquote, urljoin, urlparse +from urllib.request import url2pathname + +from ..formats import CoreFormat, format_from_str +from ..pointcloud.pcl import GlTFPointCloud +from ..project import ProjectResource +from ..types import VersionInfo +from ..versions import format_and_version_to_type + + +def join_uris(uri: str, base_uri: Optional[str]) -> Path: + """Resolve a URI relative to an absolute base URI if the input URI + is a relative URI reference, otherwise return the URI unmodified. + """ + if base_uri is not None: + uri = urljoin(base_uri + "/", uri) + + url = urlparse(uri) + if url.hostname is not None and url.hostname != "localhost": + raise ValueError( + "Only relative URI references or absolute URIs" + " referring to the localhost are supported" + ) + + if url.scheme == "file" or url.scheme == "": + return Path(url2pathname(url.path)) + + raise RuntimeError("Non-file URIs are not supported") + + +def _load_from_json(uri: Path) -> Any: + with open(str(uri)) as f: + try: + d = json.load(f) + except json.decoder.JSONDecodeError as e: + raise RuntimeError("Error parsing JSON resource %s: %s" % (uri, e)) from e + + try: + format = format_from_str(d["format"]) + version = VersionInfo.parse(d["version"]) + except KeyError: + raise RuntimeError("Input file is not a valid OPF JSON resource") + + try: + cls = format_and_version_to_type[(format, version)] + except KeyError: + raise RuntimeError( + f"Unsupported resource format and version: {format}, {version}" + ) + + try: + object = cls.from_dict(d) + if format == CoreFormat.PROJECT: + # The uri is converted to absolute based on the cwd now because we + # have no gurantee it won't be changed later and that was the path + # that was used to successfully + # open the file above. + object.base_uri = uri.resolve().parent.as_uri() + + return object + except Exception as e: + raise RuntimeError(f"Error decoding JSON resource {format}, {version}") from e + + +def _test_json_resource( + resource: str | ProjectResource, base_uri: str, _ +) -> tuple[bool, Optional[list[Any]]]: + + if isinstance(resource, str): + uri = join_uris(resource, base_uri) + else: + uri = join_uris(resource.uri, base_uri) + + if uri.suffix == ".json" or uri.suffix == ".opf": + return (True, [uri]) + return (False, None) + + +def _test_gltf_model_resource( + resource: str | ProjectResource, base_uri: str, _ +) -> tuple[bool, Optional[list[Any]]]: + + if isinstance(resource, str): + uri = join_uris(resource, base_uri) + elif resource.format == CoreFormat.GLTF_MODEL: + uri = join_uris(resource.uri, base_uri) + else: + return (False, None) + + if uri.suffix == ".gltf": + return (True, [uri]) + return (False, None) + + +def _test_gltf_binary_resource( + resource: str | ProjectResource, base_uri: str, _ +) -> tuple[bool, Optional[list[Any]]]: + + if ( + isinstance(resource, ProjectResource) + and resource.format == CoreFormat.GLTF_BUFFER + ): + return (True, []) + return (False, None) + + +loaders = [ + (_test_json_resource, _load_from_json), + (_test_gltf_model_resource, GlTFPointCloud.open), + # This is used just for skipping glTF binary buffers in the project resolver + (_test_gltf_binary_resource, lambda: None), +] +""" +A resource loader is a tuple of a test function and a loading function. +The test function must accepts a resource URI, a base URI and a list ProjectResource and returns +a tuple with a boolean, which indicates if the resource is loadable, and the list of parameters +that must be passed to the loading function which is derived from the given resources. +""" + + +class UnsupportedResource(RuntimeError): + def __init__(self, uri=None): + self.uri = uri + + +def load( + resource: str | ProjectResource, + base_uri: Optional[str] = None, + additional_resources: Optional[list[ProjectResource]] = None, +) -> Any: + """Loads a resource from a URI + :param uri: The URI of the resource to load + :param base_uri: Base URI to use to resolve relative URI references + :param additional_resouces: Additional resources for resources that require multiple + not referenced by the main resource file. + :return: The loaded resource or None if the input URI is an auxiliary resource belonging to + some other primary resource + """ + for test, loader in loaders: + accepted, params = test(resource, base_uri, additional_resources) + if accepted: + return loader(*params) + + raise UnsupportedResource(resource) diff --git a/src/pyopf/io/savers.py b/src/pyopf/io/savers.py new file mode 100644 index 0000000..609680e --- /dev/null +++ b/src/pyopf/io/savers.py @@ -0,0 +1,219 @@ +import json +import os +from pathlib import Path +from typing import Any +from urllib.parse import quote, unquote, urlparse + +from ..items import BaseItem, ExtensionItem +from ..pointcloud.pcl import GlTFPointCloud +from ..project import ( + Calibration, + Project, + ProjectItem, + ProjectObjects, + ProjectResource, +) +from ..types import CoreFormat + + +def to_uri_reference(path, base_path) -> str: + if base_path: + return quote(str(path.relative_to(base_path)).replace(os.sep, "/")) + else: + return path.as_uri() + + +def _is_core_json_object(obj: Any): + + # The plan OPF Project type is also treated as bare JSON + if isinstance(obj, Project): + return True + + try: + return ( + obj.format.value.endswith("+json") and obj.format != CoreFormat.GLTF_MODEL + ) + except AttributeError: + return False + + +def _save_to_json(obj: Any, path: Path) -> None: + + with open(path, "w") as out_file: + json.dump(obj.to_dict(), out_file, indent=4) + + +def _save_resource_to_json( + obj: Any, path: Path, base_path: str | Path | None = None, **_ +) -> list[ProjectResource]: + + _save_to_json(obj, path) + return [ProjectResource(format=obj.format, uri=to_uri_reference(path, base_path))] + + +def _save_point_cloud( + pcl: GlTFPointCloud, + output_dir: Path, + write_point_cloud_buffers: bool = False, + base_path: str | Path | None = None, + **_, +) -> list[ProjectResource]: + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + gltf_path = output_dir / "point_cloud.gltf" + buffer_filepaths = pcl.write(gltf_path, save_buffers=write_point_cloud_buffers) + + resources = [ + ProjectResource( + format=CoreFormat.GLTF_BUFFER, uri=to_uri_reference(filepath, base_path) + ) + for filepath in buffer_filepaths + ] + resources.append( + ProjectResource( + format=CoreFormat.GLTF_MODEL, uri=to_uri_reference(gltf_path, base_path) + ) + ) + return resources + + +def _save_project_and_objects( + project_objs: ProjectObjects, + path: Path, + use_item_name_for_resource_uri: bool = False, + **kwargs, +) -> None: + + base_path = path.parent + items = [] + + def resource_uri_subdir(obj): + return ( + obj.metadata.name + if use_item_name_for_resource_uri and obj.metadata.name is not None + else str(obj.metadata.id) + ) + + def save_subobjects(container, save_function): + for name, attribute in container.__dict__.items(): + # Skipping private attributes like _metadata + if name.startswith("_"): + continue + + if isinstance(attribute, list): + name_prefix = name[: -len("_objs")] + if len(attribute) == 1: + save_function(name_prefix, attribute[0], base_path) + else: + for i, obj in enumerate(attribute): + save_function(f"{name_prefix}_{i}", obj, base_path) + else: + if attribute is not None: + save_function(name, attribute, base_path) + + def save_object(prefix, obj: BaseItem | ExtensionItem, base_path): + + resources = [] + + if isinstance(obj, Calibration): + + subdir = base_path / resource_uri_subdir(obj) + try: + os.mkdir(subdir) + except FileExistsError: + if not os.path.isdir(subdir): + raise RuntimeError( + "Fatal error writing object: Path {subdir} exists, but it is not a directory" + ) + + def save_calibration_subobject(prefix, subobject, base_path): + output_path = subdir + # For objects that are not plain JSON resources we will asume the can + # decide the file names themselves and all they need is the directory + # where to write. + if _is_core_json_object(subobject): + output_path /= prefix + ".json" + + resources.extend( + save(subobject, output_path, base_path=base_path, **kwargs) + ) + + save_subobjects(obj, save_calibration_subobject) + + elif isinstance(obj, GlTFPointCloud): + resources = save( + obj, base_path / resource_uri_subdir(obj), base_path=base_path, **kwargs + ) + + elif _is_core_json_object(obj): + resources = save( + obj, str(base_path / (prefix + ".json")), base_path=base_path + ) + + assert obj.metadata is not None + + items.append( + ProjectItem( + id=obj.metadata.id, + type=obj.metadata.type, + name=obj.metadata.name, + labels=obj.metadata.labels, + resources=resources, + sources=obj.metadata.raw_sources(), + ) + ) + + save_subobjects(project_objs, save_object) + + # Saving top level project + project = Project( + id=project_objs.metadata.id, + name=project_objs.metadata.name, + description=project_objs.metadata.description, + version=project_objs.metadata.version, + generator=project_objs.metadata.generator, + items=items, + ) + _save_to_json(project, path) + + +def save(obj: Any, uri: str | Path, **kwargs) -> list[ProjectResource]: + """Save an OPF object to the given URI. + :param obj: The object to save. It may be an object directly writable in JSON format, a + ProjectObjects objet or a GlTFPointCloud + :param uri: The target destination + :param kwargs: The following parameters are accepted: + * write_point_cloud_buffers (bool): If True, the binary buffer files of point + clouds are also written then saving point clouds. + * use_item_name_for_resource_uri (bool): Certain items have resources that make a + bundle (e.g. point clouds). When saving ProjectObjects, these resources are saved + in a subdirectory relative to the project location. By default the UUID of the item + is used to name the subdirectory unless this option is set to True, in which case + the item name will be used. If the item does not have a name, the UUID is used as + a fallback + * base_path (str | Path): An optional parameter to make relative URIs in the + ProjectResource list returned. This parameter will be ignored is the input object + is of type ProjectObjects. + :return: A list of ProjectResources + """ + + if not isinstance(uri, Path): + uri = Path(unquote(urlparse(uri).path)).absolute() + + for obj_type, saver in savers: + if isinstance(obj, obj_type): + return saver(obj, uri, **kwargs) + + if _is_core_json_object(obj): + return _save_resource_to_json(obj, uri, **kwargs) + + raise RuntimeError("Save is not implemented for this type: %s" % type(obj)) + + +savers = [ + (GlTFPointCloud, _save_point_cloud), + (ProjectObjects, _save_project_and_objects), +] +"""A object saver is registered a a tuple with made by the object type and a function with signature + `f(obj: Any, path: Path, **kwargs) -> list[ProjectResource]` as value""" diff --git a/src/pyopf/items.py b/src/pyopf/items.py new file mode 100644 index 0000000..9a9f408 --- /dev/null +++ b/src/pyopf/items.py @@ -0,0 +1,85 @@ +from typing import Any, Optional + +from .formats import ( + CoreFormat, + ExtensionFormat, + format_from_str, + format_to_str, + from_format, +) +from .types import OpfObject, OpfPropertyExtObject +from .util import from_union, from_version_info +from .VersionInfo import VersionInfo + + +class BaseItem(OpfObject): + _format: CoreFormat # This type is meant to be used only with core items + _version: VersionInfo + metadata: Optional["Metadata"] # noqa: F821 # type: ignore + + @property + def format(self): + return self._format + + @property + def version(self): + return self._version + + def __init__(self, format: CoreFormat, version: VersionInfo): + super(BaseItem, self).__init__() + self._format = format + self._version = version + + def to_dict(self) -> dict: + result = super(BaseItem, self).to_dict() + result.update( + {"format": format_to_str(self.format), "version": str(self._version)} + ) + return result + + @staticmethod + def from_dict(obj: Any) -> "BaseItem": + assert isinstance(obj, dict) + format = from_union([from_format, format_from_str], obj.get("format")) + version = from_union([from_version_info, VersionInfo.parse], obj.get("version")) + return BaseItem(format, version) + + def _extract_unknown_properties_and_extensions( + self, obj: dict, ignore_keys=set() + ) -> None: + super(BaseItem, self)._extract_unknown_properties_and_extensions( + obj, ignore_keys={"format", "version"}.union(ignore_keys) + ) + + +class ExtensionItem(OpfObject): + _format: ExtensionFormat # This type is meant to be used only with extension items + _version: VersionInfo + metadata: Optional["Metadata"] # noqa: F821 # type: ignore + + @property + def format(self): + return self._format + + @property + def version(self): + return self._version + + def __init__(self, format: ExtensionFormat, version: VersionInfo): + super(ExtensionItem, self).__init__() + self._format = format + self._version = version + + def to_dict(self) -> dict: + result = super(ExtensionItem, self).to_dict() + result.update( + {"format": format_to_str(self.format), "version": str(self._version)} + ) + return result + + @staticmethod + def from_dict(obj: Any) -> "ExtensionItem": + assert isinstance(obj, dict) + format = from_union([from_format, format_from_str], obj.get("format")) + version = from_union([from_version_info, VersionInfo.parse], obj.get("version")) + return ExtensionItem(format, version) diff --git a/src/pyopf/pointcloud/__init__.py b/src/pyopf/pointcloud/__init__.py new file mode 100644 index 0000000..68a4657 --- /dev/null +++ b/src/pyopf/pointcloud/__init__.py @@ -0,0 +1 @@ +from .pcl import GlTFPointCloud, Matches, Node, PointIndexRanges diff --git a/src/pyopf/pointcloud/merge.py b/src/pyopf/pointcloud/merge.py new file mode 100644 index 0000000..ce0113f --- /dev/null +++ b/src/pyopf/pointcloud/merge.py @@ -0,0 +1,247 @@ +import copy +import os +from pathlib import Path +from typing import Any, Optional + +import numpy as np + +from .pcl import ( + GlTFPointCloud, + ImagePoints, + Matches, + PointIndexRanges, + opf_axis_rotation_matrix, + opf_axis_rotation_matrix_inverse, +) +from .utils import merge_arrays + + +def _check_property(objs: list[Any], prop: str): + """Check if a property exist in all items of a list + :param objs: A list of objects where to check for the presence of the property + :param prop: The name of the property to check + + :return: True if the property is present in all objects, False if the property is not present in any of the objects + + :raise ValueError: If the property is present only in some of the objects, but not in all of them + """ + flags = [getattr(p, prop, None) is not None for p in objs] + + if any(flags) and not all(flags): + raise ValueError("Not all pointclouds share property: " + prop) + + return all(flags) + + +def _apply_affine_transform(array: np.ndarray | np.memmap, matrix: np.ndarray) -> None: + """Applies in-place the affine transform represented by matrix to the points of array. + :raise ValueError: If array does not have the shape (,3) or if matrix does not have the shape (4,4) + """ + upper_left_matrix = matrix[:3, :3] + translation = matrix[:3, 3] + array[:] = array @ upper_left_matrix.transpose() + translation + + +def _merge_image_points( + image_points: list[ImagePoints], output_gltf_dir: Path +) -> ImagePoints: + """Merge the ImagePoints data structure used as part of the OPF_mesh_primitive_matches glTF extension. + + :param image_points: A list of ImagePoints structures to merge. It is modified in-place. + :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. + + :return: The merged ImagePoints structure. + + :raise ValueError: If the image_points list is empty. + """ + + if len(image_points) == 0: + raise ValueError("Empty image_points list") + + image_points[0].featureIds = merge_arrays( + [ip.featureIds for ip in image_points], output_gltf_dir / "matchFeatureIds.bin" + ) + image_points[0].pixelCoordinates = merge_arrays( + [ip.pixelCoordinates for ip in image_points], + output_gltf_dir / "matchPixelCoordinates.bin", + ) + image_points[0].scales = merge_arrays( + [ip.scales for ip in image_points], output_gltf_dir / "matchScales.bin" + ) + + if _check_property(image_points, "depths"): + image_points[0].depths = merge_arrays( + [ip.depths for ip in image_points if ip.depths is not None], + output_gltf_dir / "matchDepths.bin", + ) + + return image_points[0] + + +def _merge_matches(matches: list[Matches], output_gltf_dir: Path) -> Matches: + """Merge the Matches data structure used as part of the OPF_mesh_primitive_matches glTF extension. + + :param matches: A list of Matches structures to merge. It is modified in-place. + :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. + + :return: The merged Matches structure. + + :raise ValueError: If the matches list is empty. + """ + + if len(matches) == 0: + raise ValueError("Empty matches list") + + camera_uids = [] + for m in matches: + camera_uids.extend(m.camera_uids) + + camera_ids = merge_arrays( + [m.camera_ids for m in matches], output_gltf_dir / "matchCameraIds.bin" + ) + + offset = matches[0].camera_ids.shape[0] + uid_offset = len(matches[0].camera_uids) + for m in matches[1:]: + camera_ids[offset : offset + len(m.camera_ids)] += uid_offset + offset += len(m.camera_ids) + uid_offset += len(m.camera_uids) + + new_ranges = merge_arrays( + [m.point_index_ranges.ranges for m in matches], + output_gltf_dir / "matchPointIndexRanges.bin", + ) + point_index_ranges = PointIndexRanges(new_ranges) + + offset = 0 + camera_ids_offset = 0 + for m in matches: + for i in range(len(m.point_index_ranges)): + o, c = m.point_index_ranges[i] + point_index_ranges[offset + i] = (o + camera_ids_offset, c) + offset += len(m.point_index_ranges) + camera_ids_offset += len(m.camera_ids) + + matches[0].camera_uids = camera_uids + matches[0].camera_ids = camera_ids + matches[0].point_index_ranges = point_index_ranges + + if _check_property(matches, "image_points"): + matches[0].image_points = _merge_image_points( + [m.image_points for m in matches if m.image_points is not None], + output_gltf_dir, + ) + + return matches[0] + + +def _merge_custom_attributes( + custom_attributes: list[dict[str, np.ndarray | np.memmap]], output_gltf_dir: Path +) -> Optional[dict[str, np.ndarray | np.memmap]]: + """Merge a list of custom attributes. + :param custom_attributes: A list of dictionaries, representing the custom attributes of multiple point clouds + :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. + + :return: A dictionary mapping the custom attribute name to the numpy buffer or None if no common attributes were found + """ + + if len(custom_attributes) == 0 or len(custom_attributes[0]) == 0: + return None + + common_attributes = set.intersection( + *[set(attributes.keys()) for attributes in custom_attributes] + ) + + attributes = {} + for common_attribute in common_attributes: + arrays = [attributes[common_attribute] for attributes in custom_attributes] + merged_attribute = merge_arrays( + arrays, output_gltf_dir / (common_attribute + ".bin") + ) + attributes[common_attribute] = merged_attribute + + return attributes + + +def concatenate(pointclouds: list[GlTFPointCloud]) -> GlTFPointCloud: + """Concatenate the nodes of all point clouds in a single point cloud. + The nodes may not share the same properties. + + :param pointclouds: The list of pointclouds to concantenate + :return: A pointcloud which has as nodes all the nodes of the other pointclouds + """ + + concatenated = copy.deepcopy(pointclouds[0]) + + for pointcloud in pointclouds[1:]: + concatenated.nodes.extend(pointcloud.nodes) + + return concatenated + + +def collapse(pointcloud: GlTFPointCloud, output_gltf_dir: Path) -> GlTFPointCloud: + """Collapse all nodes in a point cloud into one. + The first node keeps its matrix. + All nodes must share the same properties, including extensions and custom attributes. + + :param pointcloud: The pointclouds whose nodes to collapse. The data is modified in place and not recommended to use after this call. + :param output_gltf_dir: The output dir for the glTF point cloud. It is assumed to exist. + + :return pointcloud: A point cloud which has only one node, containing the merged information from all its nodes. + + :raise ValueError: If only some of the nodes have some optional property present. + + :raise FileNotFoundError: If output_gltf_dir does not exist. + """ + + if not os.path.exists(output_gltf_dir): + raise FileNotFoundError( + "Output directory %s does not exist " % str(output_gltf_dir) + ) + + position = merge_arrays( + [n.position for n in pointcloud.nodes], output_gltf_dir / "positions.bin" + ) + offset = 0 + + for node in pointcloud.nodes: + count = len(node.position) + matrix = node.matrix if node.matrix is not None else np.eye(4) + matrix = opf_axis_rotation_matrix_inverse @ matrix + _apply_affine_transform(position[offset : offset + count], matrix) + offset += count + + pointcloud.nodes[0].position = position + pointcloud.nodes[0].matrix = opf_axis_rotation_matrix + + if _check_property(pointcloud.nodes, "color"): + pointcloud.nodes[0].color = merge_arrays( + [n.color for n in pointcloud.nodes if n.color is not None], + output_gltf_dir / "colors.bin", + ) + + if _check_property(pointcloud.nodes, "normal"): + pointcloud.nodes[0].normal = merge_arrays( + [n.normal for n in pointcloud.nodes if n.normal is not None], + output_gltf_dir / "normals.bin", + ) + + if _check_property(pointcloud.nodes, "matches"): + pointcloud.nodes[0].matches = _merge_matches( + [n.matches for n in pointcloud.nodes if n.matches is not None], + output_gltf_dir, + ) + + if _check_property(pointcloud.nodes, "custom_attributes"): + pointcloud.nodes[0].custom_attributes = _merge_custom_attributes( + [ + n.custom_attributes + for n in pointcloud.nodes + if n.custom_attributes is not None + ], + output_gltf_dir, + ) + + pointcloud.nodes = [pointcloud.nodes[0]] + + return pointcloud diff --git a/src/pyopf/pointcloud/pcl.py b/src/pyopf/pointcloud/pcl.py new file mode 100644 index 0000000..3896e97 --- /dev/null +++ b/src/pyopf/pointcloud/pcl.py @@ -0,0 +1,660 @@ +import os +from dataclasses import fields +from pathlib import Path +from typing import Literal, Optional, Type + +import numpy as np +import pygltflib +from pygltflib import GLTF2 + +from pyopf.uid64 import Uid64 + +from ..types import CoreFormat +from ..versions import FormatVersion, VersionInfo +from .utils import ( + Buffer, + add_accessor, + add_buffers, + gl_to_numpy_shape, + gl_to_numpy_type, + write_buffers, +) + +opf_axis_rotation_matrix = np.array( + [[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]] +) +opf_axis_rotation_matrix_inverse = np.array( + [[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]] +) + + +class PointIndexRanges: + """A wrapper over the packed pointIndexRanges array used by the OPF_mesh_primitive_matches extension. + + The index ranges are represented by a pair of a 5 byte unsigned int for the offset and a 3 byte unsigned int for + the count, which are packed and stored as 2 unsigned int values of 4 bytes. + + The raw data can be accessed by the `ranges` member, while the __getitem__ and __setitem__ functions can be used + for packing/unpacking the data. + """ + + def __init__(self, ranges: np.ndarray | np.memmap): + """Construct a PointIndexRanges wrapper over the raw data. + :param ranges: The point index range information, stored as an array of shape (num_points, 2). + :raise ValueError: If ranges does not have two columns + """ + if ranges.shape[1] != 2: + raise ValueError("The ranges array should have two columns") + self.ranges = ranges + self.nbytes = self.ranges.nbytes + + def __getitem__(self, index: int) -> tuple[int, int]: + a = self.ranges[index, 0] + b = self.ranges[index, 1] + allbytes = a.tobytes() + b.tobytes() + offset = int.from_bytes(allbytes[:5], "little") + count = int.from_bytes(allbytes[5:], "little") + return (offset, count) + + def __setitem__(self, index: int, item: tuple[int, int]): + offset, count = item + allbytes = offset.to_bytes(5, "little") + count.to_bytes(3, "little") + a = int.from_bytes(allbytes[:4], "little") + b = int.from_bytes(allbytes[4:], "little") + self.ranges[index, 0] = a + self.ranges[index, 1] = b + + def __len__(self) -> int: + return self.ranges.shape[0] + + def total_count(self) -> int: + return sum(count for _, count in self) + + +class ImagePoints: + """Image Points, used by the OPF_mesh_primitive_matches extension""" + + featureIds: np.ndarray | np.memmap + scales: np.ndarray | np.memmap + pixelCoordinates: np.ndarray | np.memmap + depths: Optional[np.ndarray | np.memmap] + + def __init__(self): + self.depths = None + self.featureIds = np.array([]) + self.pixelCoordinates = np.array([]) + self.scales = np.array([]) + + @staticmethod + def from_gltf(image_points_extension: dict, accessors: list): + points = ImagePoints() + if "depths" in image_points_extension: + points.depths = accessors[image_points_extension["depths"]] + else: + points.depths = None + points.featureIds = accessors[image_points_extension["featureIds"]] + points.pixelCoordinates = accessors[image_points_extension["pixelCoordinates"]] + points.scales = accessors[image_points_extension["scales"]] + + return points + + @property + def buffer_filepaths(self) -> list[str | None]: + """Return a list of absolute file paths to the memory mapped arrays""" + + if ( + not hasattr(self.featureIds, "filename") + or not hasattr(self.pixelCoordinates, "filename") + or not hasattr(self.scales, "filename") + or not (self.depths is None or hasattr(self.depths, "filename")) + ): + raise ValueError("The image points have not been saved to disk") + + buffer_filenames = [ + self.featureIds.filename, # type: ignore + self.pixelCoordinates.filename, # type: ignore + self.scales.filename, # type: ignore + ] + if self.depths is not None: + buffer_filenames.append(self.depths.filename) # type: ignore + + return buffer_filenames # type: ignore + + def flush(self) -> None: + """Write to disk any changes to the binary buffers""" + + if ( + not hasattr(self.featureIds, "flush") + or not hasattr(self.pixelCoordinates, "flush") + or not hasattr(self.scales, "flush") + or not (self.depths is None or hasattr(self.depths, "flush")) + ): + raise ValueError("The image points have not been saved to disk") + + self.featureIds.flush() # type: ignore + self.pixelCoordinates.flush() # type: ignore + self.scales.flush() # type: ignore + + if self.depths is not None: + self.depths.flush() # type: ignore + + def write( + self, gltf: pygltflib.GLTF2, buffers: dict[Path, Buffer], output_gltf_dir: Path + ) -> dict: + """Adds the required accessors to the GLTF2 object (also creating the corresponding files) + :param gltf: The gltf object to add the accessors to + :param buffers: A dictionary mapping file paths with buffer objects + :param output_gltf_dir: The output directory for the binary buffers + :return: A dictionary with the accessor ids for the class members + """ + gltf_image_points = { + "pixelCoordinates": add_accessor( + gltf, + buffers, + self.pixelCoordinates, + output_gltf_dir / "matchPixelCoordinates.bin", + ), + "featureIds": add_accessor( + gltf, buffers, self.featureIds, output_gltf_dir / "matchFeatureIds.bin" + ), + "scales": add_accessor( + gltf, buffers, self.scales, output_gltf_dir / "matchScales.bin" + ), + } + if self.depths is not None: + gltf_image_points["depths"] = add_accessor( + gltf, buffers, self.depths, output_gltf_dir / "matchDepths.bin" + ) + + return gltf_image_points + + +class Matches: + """Used by the OPF_mesh_primitive_matches extension""" + + camera_uids: list[Uid64] + camera_ids: np.memmap | np.ndarray + point_index_ranges: PointIndexRanges + image_points: Optional[ImagePoints] + + def __init__(self): + self.camera_uids = [] + self.camera_ids = np.ndarray([]) + self.point_index_ranges = PointIndexRanges(np.array([]).reshape(-1, 2)) + self.image_points = None + + @staticmethod + def from_gltf(extension: dict, accessors: list, version: VersionInfo): + """ + Construct the matches object from glTF extension OPF_mesh_primitive_matches + + :param extension: The extension data + :param accessors: The list of all accesors available - this class will keep referencing the ones it needs + """ + matches = Matches() + if ( + version > VersionInfo(1, 0, "draft7") + and version <= FormatVersion.GLTF_OPF_ASSET + ): + matches.camera_uids = [Uid64(int=id) for id in extension["cameraUids"]] + elif version <= VersionInfo(1, 0, "draft7"): + matches.camera_uids = [Uid64(hex=id) for id in extension["cameraUids"]] + else: + raise ValueError(f"Unsupported OPF glTF version: {version}") + + matches.camera_ids = accessors[extension["cameraIds"]] + matches.point_index_ranges = PointIndexRanges( + accessors[extension["pointIndexRanges"]] + ) + + matches.image_points = ( + ImagePoints.from_gltf(extension["imagePoints"], accessors) + if "imagePoints" in extension + else None + ) + return matches + + @property + def buffer_filepaths(self) -> list[Path]: + """Return a list of absolute file paths to the memory mapped arrays""" + + if not hasattr(self.camera_ids, "filename") or not hasattr( + self.point_index_ranges.ranges, "filename" + ): + raise ValueError("The matches are not stored in a memory mapped array.") + + buffer_filenames = [ + self.camera_ids.filename, # type: ignore + self.point_index_ranges.ranges.filename, # type: ignore + ] + + if self.image_points is not None: + buffer_filenames.extend(self.image_points.buffer_filepaths) + + return buffer_filenames # type: ignore + + def flush(self) -> None: + """Write to disk any changes to the binary buffers""" + + if not hasattr(self.camera_ids, "flush") or not hasattr( + self.point_index_ranges.ranges, "flush" + ): + raise ValueError("The matches are not stored in a memory mapped array.") + + self.camera_ids.flush() # type: ignore + self.point_index_ranges.ranges.flush() # type: ignore + + if self.image_points is not None: + self.image_points.flush() + + def write( + self, + gltf: pygltflib.GLTF2, + buffers: dict[Path, Buffer], + output_gltf_dir: Path, + version: VersionInfo, + ) -> dict: + """Adds accessors for this object's data to a glTF object and saves the corresponding buffers to files. + :param gltf: The gltf object to add the accessors to + :param buffers: A dictionary mapping file paths with buffer objects + :param output_gltf_dir: The output directory for the binary buffers + :return: A dictionary corresponding to the OPF gltf specification for the matches extension + """ + opf_mesh_primitive_matches = { + "cameraIds": add_accessor( + gltf, buffers, self.camera_ids, output_gltf_dir / "matchCameraIds.bin" + ), + "pointIndexRanges": add_accessor( + gltf, + buffers, + self.point_index_ranges.ranges, + output_gltf_dir / "matchPointIndexRanges.bin", + ), + } + if ( + version > VersionInfo(1, 0, "draft7") + and version <= FormatVersion.GLTF_OPF_ASSET + ): + opf_mesh_primitive_matches["cameraUids"] = [ + int(id.int) for id in self.camera_uids + ] + elif version <= VersionInfo(1, 0, "draft7"): + opf_mesh_primitive_matches["cameraUids"] = [ + str(id) for id in self.camera_uids + ] + else: + raise ValueError(f"Unsupported OPF glTF version: {version}") + + if self.image_points: + opf_mesh_primitive_matches["imagePoints"] = self.image_points.write( + gltf, buffers, output_gltf_dir + ) + + return opf_mesh_primitive_matches + + +class Node: + """A glTF node""" + + position: np.ndarray | np.memmap + normal: np.ndarray | np.memmap | None + color: np.ndarray | np.memmap | None + matches: Matches | None + matrix: np.ndarray | None + custom_attributes: dict[str, np.ndarray | np.memmap] | None + + def __init__(self): + self.position = np.zeros((0, 3)) + self.color = None + self.normal = None + self.matches = None + self.custom_attributes = None + self.matrix = None + + @staticmethod + def from_gltf( + node_id: int, + gltf: GLTF2, + accessors: list[np.ndarray] | list[np.memmap], + version: VersionInfo, + ) -> "Node": + """Construct an object representing the glTF node with id node_id. + The node id must be a valid node id. + + :param node_id: The id of the node to construct + :param gltf: The glTF object + :param accessors: A list of arrays representing the data of the accessors, sharing the same indices + """ + + mesh = gltf.nodes[node_id].mesh + + if mesh is None: + raise ValueError("The node must have a mesh.") + + primitive = gltf.meshes[mesh].primitives[0] + + node = Node() + + if primitive.attributes.POSITION is None: + raise ValueError("The mesh must have a position attribute.") + + node.position = accessors[primitive.attributes.POSITION] + node.color = ( + accessors[primitive.attributes.COLOR_0] + if primitive.attributes.COLOR_0 + else None + ) + node.normal = ( + accessors[primitive.attributes.NORMAL] + if primitive.attributes.NORMAL + else None + ) + + node.matches = None + if ( + primitive.extensions is not None + and "OPF_mesh_primitive_matches" in primitive.extensions + ): + node.matches = Matches.from_gltf( + primitive.extensions["OPF_mesh_primitive_matches"], accessors, version + ) + + node.custom_attributes = None + if ( + primitive.extensions is not None + and "OPF_mesh_primitive_custom_attributes" in primitive.extensions + ): + node.custom_attributes = {} + for attribute_name, accessor_id in primitive.extensions[ + "OPF_mesh_primitive_custom_attributes" + ]["attributes"].items(): + node.custom_attributes[attribute_name] = accessors[accessor_id] + + node.matrix = np.array(gltf.nodes[node_id].matrix) + if node.matrix is not None: + node.matrix = np.array(node.matrix).reshape((4, 4), order="F") + + return node + + def __len__(self): + """The number of points in the node""" + return self.position.shape[0] + + @property + def buffer_filepaths(self) -> list[Path]: + """Return a list of absolute file paths to the memory mapped arrays""" + + if not hasattr(self.position, "filename"): + raise ValueError("The node is not stored in a memory mapped array.") + + buffer_filenames = [self.position.filename] # type: ignore + + if self.color is not None: + buffer_filenames.append(self.color.filename) # type: ignore + if self.normal is not None: + buffer_filenames.append(self.normal.filename) # type: ignore + if self.matches is not None: + buffer_filenames.extend(self.matches.buffer_filepaths) # type: ignore + if self.custom_attributes is not None: + for custom_attribute_buffer in self.custom_attributes.values(): + buffer_filenames.append(custom_attribute_buffer.filename) # type: ignore + + return buffer_filenames # type: ignore + + def flush(self) -> None: + """Write to disk any changes to the binary buffers""" + + if not hasattr(self.position, "flush"): + raise ValueError("The node is not stored in a memory mapped array.") + + self.position.flush() # type: ignore + + if self.color is not None: + self.color.flush() # type: ignore + if self.normal is not None: + self.normal.flush() # type: ignore + if self.matches is not None: + self.matches.flush() # type: ignore + if self.custom_attributes is not None: + for custom_attribute_buffer in self.custom_attributes.values(): + custom_attribute_buffer.flush() # type: ignore + + def write( + self, + gltf: pygltflib.GLTF2, + buffers: dict[Path, Buffer], + output_gltf_dir: Path, + version: VersionInfo, + ): + """Adds the node to the GLTF2 object in-place and writes the associated binary buffers + :param gltf: The GLTF2 object to add the node to + :param buffers: A dictionary mapping file paths with buffer objects + :param output_gltf_dir: Path where to write the binary buffers. + It is assumed that the *.gltf file will be written in the same place. + """ + gltf.nodes.append(pygltflib.Node(mesh=len(gltf.meshes))) + gltf.meshes.append( + pygltflib.Mesh( + primitives=[ + pygltflib.Primitive( + attributes=pygltflib.Attributes(), + indices=None, + material=0, + mode=pygltflib.POINTS, + extensions={}, + ) + ] + ) + ) + + primitive = gltf.meshes[-1].primitives[0] + + primitive.attributes.POSITION = add_accessor( + gltf, buffers, self.position, output_gltf_dir / "positions.bin" + ) + gltf.accessors[-1].min = self.position.min(axis=0).tolist() + gltf.accessors[-1].max = self.position.max(axis=0).tolist() + + if self.normal is not None: + primitive.attributes.NORMAL = add_accessor( + gltf, buffers, self.normal, output_gltf_dir / "normals.bin" + ) + + if self.color is not None: + primitive.attributes.COLOR_0 = add_accessor( + gltf, buffers, self.color, output_gltf_dir / "colors.bin" + ) + gltf.accessors[-1].normalized = True + + if self.matches is not None: + gltf.extensionsUsed.append("OPF_mesh_primitive_matches") + primitive.extensions["OPF_mesh_primitive_matches"] = self.matches.write(gltf, buffers, output_gltf_dir, version) # type: ignore + + if self.custom_attributes is not None: + gltf.extensionsUsed.append("OPF_mesh_primitive_custom_attributes") + attributes = { + name: add_accessor( + gltf, buffers, value, output_gltf_dir / (name + ".bin") + ) + for name, value in self.custom_attributes.items() + } + + assert primitive.extensions is not None + + primitive.extensions["OPF_mesh_primitive_custom_attributes"] = { + "attributes": attributes + } + + if self.matrix is not None: + gltf.nodes[-1].matrix = self.matrix.flatten(order="F").tolist() + + +class GlTFPointCloud: + """Open glTF point cloud""" + + _format: CoreFormat + _version: VersionInfo + nodes: list[Node] = [] + metadata: Optional["Metadata"] # noqa: F821 # type: ignore + + mode_type = Literal[ + "r", "c", "r+", "w+", "readonly", "copyonwrite", "readwrite", "write" + ] + + @property + def format(self): + return self._format + + @property + def version(self): + return self._version + + def _open_accessors( + self, gltf: GLTF2, base_dir: Path, mode: mode_type = "r" + ) -> list[np.memmap]: + """ + Utility to read the accessors of a glTF point cloud and return them as a list of numpy memmap arrays + :gltf GLTF2: A GLTF2 object + :base_dir Path: Base path where the glTF binary files are located + :mode str: Open mode for the memory mapped arrays + + :return list[numpy.memmap]: A list of numpy memmap arrays, indexed by their corresponding glTF indices + """ + + accessors = [] + + for accessor in gltf.accessors: + accessor_buffer_view = accessor.bufferView + + if accessor_buffer_view is None: + raise RuntimeError("Accessor is missing bufferView") + + buffer_view = gltf.bufferViews[accessor_buffer_view] + buffer_uri = gltf.buffers[buffer_view.buffer].uri + buffer_view_offset = buffer_view.byteOffset + + if buffer_uri is None: + raise RuntimeError("Buffer is missing uri") + if buffer_view_offset is None: + raise RuntimeError("BufferView is missing byteOffset") + + new_accessor = np.memmap( + base_dir / buffer_uri, + mode=mode, + dtype=gl_to_numpy_type(accessor.componentType), + offset=buffer_view_offset, + shape=(accessor.count, gl_to_numpy_shape(accessor.type)), + ) + accessors.append(new_accessor) + + return accessors + + def __init__(self): + self._format = CoreFormat.GLTF_MODEL + self._version = FormatVersion.GLTF_OPF_ASSET + + @staticmethod + def open(gltf_path: Path, mode: mode_type = "r"): + """Read a point cloud object. + The accessors are resolved immediately and opened as numpy memmory mapped arrays, with the appropriate mode. + The gltf file is assumed to be valid and nonempty. + + :param gltf_path: The path to the *.gltf file + :param mode: Read mode for the accessors + """ + + gltf = GLTF2().load(gltf_path) + + if gltf is None: + raise RuntimeError("The glTF file %s could not be loaded" % gltf_path) + + pcl = GlTFPointCloud() + + if gltf.asset.extensions is None: + raise RuntimeError("GlTF asset has no extensions") + if ( + "OPF_asset_version" not in gltf.asset.extensions + or "version" not in gltf.asset.extensions["OPF_asset_version"] + ): + raise RuntimeError("OPF_asset_version extension missing or incorrect") + + pcl._version = VersionInfo.parse( + gltf.asset.extensions["OPF_asset_version"]["version"] + ) + + accessors = pcl._open_accessors(gltf, gltf_path.parent, mode) + + pcl.nodes = [ + Node.from_gltf(node_id, gltf, accessors, pcl._version) # type: ignore + for node_id in range(len(gltf.nodes)) + ] + + return pcl + + def __len__(self) -> int: + """The number of nodes in the glTF Point Cloud""" + return len(self.nodes) + + def is_readonly(self): + """Check if the binary buffers are read only""" + if len(self) == 0: + raise RuntimeError("Empty point cloud") + return not self.nodes[0].position.flags.writeable + + def flush(self) -> None: + """Write to disk any changes to the binary buffers""" + for node in self.nodes: + node.flush() + + def write(self, output_gltf_file: Path, save_buffers=True): + """Write the object as glTF point cloud. The binary buffers will be saved in the parent directory of the + glTF file, overriding any existing ones. + :param output_gltf_file: Path to the final*.gltf file. The binary buffers will be saved in the same directory. + :param save_buffers: If true, the binary buffers are written to files. Otherwise, their location is kept. + :return: A list of paths to the existing or new location of the binary buffers. + :raise FileNotFoundError: If the parent directory of `output_gltf_file` does not exist. + """ + + buffers = {} + output_gltf_dir = output_gltf_file.parent + + if not os.path.exists(output_gltf_dir): + raise FileNotFoundError( + "Output directory %s does not exist " % str(output_gltf_dir) + ) + + gltf = GLTF2( + scene=0, + scenes=[pygltflib.Scene(nodes=[0])], + materials=[ + pygltflib.Material( + emissiveFactor=None, + alphaMode=None, + alphaCutoff=None, + doubleSided=None, + extensions={"KHR_materials_unlit": {}}, + ) + ], + ) + + gltf.extensionsUsed = ["KHR_materials_unlit", "OPF_asset_version"] + + for node in self.nodes: + node.write(gltf, buffers, output_gltf_dir, self._version) + + if save_buffers: + write_buffers(buffers) + + add_buffers(gltf, buffers, output_gltf_dir) + + asset = pygltflib.Asset( + version="2.0", + extensions={"OPF_asset_version": {"version": str(self._version)}}, + ) + + gltf.save(str(output_gltf_file), asset) + + if save_buffers: + return [buffer.filepath for buffer in buffers.values()] + else: + return [path for node in self.nodes for path in node.buffer_filepaths] diff --git a/src/pyopf/pointcloud/utils.py b/src/pyopf/pointcloud/utils.py new file mode 100644 index 0000000..a9cc739 --- /dev/null +++ b/src/pyopf/pointcloud/utils.py @@ -0,0 +1,255 @@ +from pathlib import Path +from urllib import parse + +import numpy as np +import pygltflib + + +def gl_to_numpy_type(gl_code: int) -> type: + """ + Convert the OpenGL codes used by glTF to represent data types into numpy dtypes + + :raises ValueError: if the type is not supported + """ + match gl_code: + case 5121: + return np.uint8 + case 5125: + return np.uint32 + case 5126: + return np.float32 + case 5123: + return np.uint16 + case _: + raise ValueError( + "Unsupported or invalid glTF attribute type: code %d" % gl_code + ) + + +def gl_to_numpy_shape(gl_shape: str) -> int: + """ + Get the number of elements in a glTF object + :raises ValueError: if the object type is not supported + """ + match gl_shape: + case "SCALAR": + return 1 + case "VEC2": + return 2 + case "VEC3": + return 3 + case "VEC4": + return 4 + case _: + raise ValueError( + "Unsupported or invalid glTF attribute shape: code %d" % gl_shape + ) + + +def _numpy_to_gl_type(dtype: np.dtype) -> int: + """ + Convert numpy types into pygltflib codes. + :raises ValueError if the type is not supported + """ + match dtype.type: + case np.float32: + return pygltflib.FLOAT + case np.uint32: + return pygltflib.UNSIGNED_INT + case np.uint16: + return pygltflib.UNSIGNED_SHORT + case np.uint8: + return pygltflib.UNSIGNED_BYTE + case _: + raise ValueError("Unsupported type in glTF " + str(dtype)) + + +def _numpy_to_gl_shape(count: int) -> str: + """ + Converts the number of elements into an appropriate vector type for pygltflib. + :raises ValueError: if the count is not supported + """ + match count: + case 1: + return pygltflib.SCALAR + case 2: + return pygltflib.VEC2 + case 3: + return pygltflib.VEC3 + case 4: + return pygltflib.VEC4 + case _: + raise ValueError("Unsupported vector type with %s elements" % count) + + +def merge_arrays(arrays: list[np.ndarray | np.memmap], output_file: Path) -> np.ndarray: + """Merge multiple 2D numpy arrays in a single memory mapped array, along the first dimension. The second dimension must be the same. + + :param arrays: The list of numpy arrays to merge. + :param output_file: The path to the memory mapped file to write. If the file is present, it will be overwritten. + + :return: The newly created memory mapped array. + + :raise ValueError: If any of the arrays is not bi-dimensional, if they do not have matching data types + or do not agree in the second dimension + """ + for a in arrays: + if len(a.shape) != 2: + raise ValueError("Can only merge bi-dimensional arrays") + if a.shape[1] != arrays[0].shape[1]: + raise ValueError("Arrays do not have the same number of columns") + if a.dtype != arrays[0].dtype: + raise ValueError("Arrays do not have the same data types") + + total_rows = sum(a.shape[0] for a in arrays) + + newAccessor = np.memmap( + output_file, + mode="w+", + dtype=arrays[0].dtype, + offset=0, + shape=(total_rows, arrays[0].shape[1]), + ) + + written_so_far = 0 + for a in arrays: + newAccessor[written_so_far : written_so_far + a.shape[0], :] = a + written_so_far += a.shape[0] + + return newAccessor + + +class Buffer: + """An abstraction of a glTF buffer whose data is shared by multiple arrays. + The arrays are merged into a file before writing. + """ + + arrays: list[np.memmap | np.ndarray] + + def __init__(self, buffer_id: int): + """Create a new buffer entry. + :param buffer_id: The glTF id of the buffer. + """ + self.buffer_id = buffer_id + self.arrays = [] + + def add_array(self, data: np.memmap | np.ndarray): + """Adds a new array of data to the current buffer""" + self.arrays.append(data) + + def write(self, filepath: Path): + """Concatenate the data and write to file. + :param filepath: The file path to write the data to. It is overwritten if present. + + :raise RuntimeError: If the buffer doesn't contain any data. + :raise ValueError: If the arrays do not match in their second dimension. + """ + + if len(self.arrays) == 0: + return RuntimeError("There is no data added to the buffer") + + self.arrays = [merge_arrays(self.arrays, filepath)] + + def __len__(self): + """Returns the total amount of data in the current buffer""" + return sum([buffer.nbytes for buffer in self.arrays]) + + @property + def number_of_arrays(self): + """Return the number of arrays used to store the the data of this buffer""" + return len(self.arrays) + + @property + def filepath(self): + """Returns the file path at which the buffer is saved to, in the case where there is only one buffer.` + :raise RuntimeError: If the object contains multiple arrays or none. + :raise ValueError: If the buffer is not a memory mapped array. + """ + if self.number_of_arrays != 1: + raise RuntimeError("There are none or multiple binary files in this buffer") + if not hasattr(self.arrays[0], "filename") or self.arrays[0].filename is None: # type: ignore + raise ValueError("The buffer is not a memory mapped array") + + return self.arrays[0].filename # type: ignore + + +def add_accessor( + gltf: pygltflib.GLTF2, + buffers: dict[Path, Buffer], + data: np.ndarray | np.memmap, + filepath: Path, +) -> int: + """ + Adds a new accessor to a GLTF2 object and a corresponding buffer view. + Assumes there is a one to one correspondence between accessors, buffer views. + The buffers parameter is also updated with the data to write. + The GLTF2 object is modified in place. + + :param gltf: The GLTF2 object where to add the new accessor + :param buffers: A dictionary of buffers, mapping the filepath to a buffer object containing the data associated + to that file path. If there is no data for a specific filepath, a new object is created. + :param data: A numpy array which contains the data for the accessor + The format will be inferred from the shape and data type - it is assumed to be row vectors + :param filepath: The filepath for the binary data. It is assumed to be relative and not contain special characters. + + :return: The id of the new accessor + """ + + accessor_id = len(gltf.accessors) + buffer_view_id = accessor_id + + if filepath not in buffers: + buffers[filepath] = Buffer(len(buffers.keys())) + + buffer_id = buffers[filepath].buffer_id + + gltf.accessors.append( + pygltflib.Accessor( + bufferView=buffer_view_id, + type=_numpy_to_gl_shape(data.shape[1]), + count=data.shape[0], + componentType=_numpy_to_gl_type(data.dtype), + min=None, + max=None, + byteOffset=None, + ) + ) + gltf.bufferViews.append( + pygltflib.BufferView( + buffer=buffer_id, + byteOffset=len(buffers[filepath]), + byteLength=data.nbytes, + target=pygltflib.ARRAY_BUFFER, + ) + ) + + buffers[filepath].add_array(data) + + return accessor_id + + +def write_buffers(buffers: dict[Path, Buffer]): + """Write the buffers to the associated files. + The file names are taken as the dictionary keys, and must be either relative to the current directory or absolute. + + :raise RuntimeError: If the buffers could not be written. + This happens if they do not contain data or their arrays do not match in the second dimension. + """ + for filepath, buffer in buffers.items(): + buffer.write(filepath) + + +def add_buffers(gltf: pygltflib.GLTF2, buffers: dict[Path, Buffer], base_path: Path): + """Register the buffers in the glTF object. + :param gltf: The GLTF2 object where to add the buffers + :param buffers: A dictionary mapping a file path to a list of buffers. + :param base_path: The base path for the relative URI. + :param save_buffers: If False, the current file paths of the buffers are used. Otherwise, the buffers are assumed + to be available at the file paths indicated by the dictionary keys. + """ + + for buffer in buffers.values(): + if buffer.filepath is None: + raise ValueError("The buffer is not a memory mapped file") + uri = parse.quote(str(Path(buffer.filepath).relative_to(base_path))) + gltf.buffers.append(pygltflib.Buffer(byteLength=len(buffer), uri=uri)) diff --git a/src/pyopf/project/__init__.py b/src/pyopf/project/__init__.py new file mode 100644 index 0000000..d39a1c4 --- /dev/null +++ b/src/pyopf/project/__init__.py @@ -0,0 +1,15 @@ +from .metadata import Metadata, ProjectMetadata +from .project import ( + Generator, + Project, + ProjectItem, + ProjectResource, + ProjectSource, +) +from .project_objects import Calibration, ProjectObjects +from .types import ( + CoreProjectItemType, + ExtensionProjectItemType, + ProjectItemType, + UnknownProjectItemType, +) diff --git a/src/pyopf/project/metadata.py b/src/pyopf/project/metadata.py new file mode 100644 index 0000000..ee61fcc --- /dev/null +++ b/src/pyopf/project/metadata.py @@ -0,0 +1,53 @@ +from dataclasses import dataclass, field +from typing import Optional, Union +from uuid import UUID, uuid4 + +from ..VersionInfo import VersionInfo +from ..versions import FormatVersion +from .project import Generator, ProjectItem, ProjectItemType, ProjectSource + + +class Sources: + """Placeholder class for declaring a project object sources as named properties""" + + pass + + +@dataclass(order=False, kw_only=True) +class Metadata: + type: ProjectItemType + id: UUID = field(default_factory=uuid4) + name: Optional[str] = None + labels: Optional[list[str]] = None + sources: Union[list[ProjectSource], Sources] = field(default_factory=list) + """The object sources. This may contain an object with named attributes + pointing to the sources or a list of ProjectSources""" + + @staticmethod + def from_item(item: ProjectItem) -> "Metadata": + return Metadata( + id=item.id, + name=item.name, + type=item.type, + labels=item.labels, + sources=item.sources, + ) + + def raw_sources(self) -> list[ProjectSource]: + """Undoes source resolution and returns a list of project sources.""" + if type(self.sources) is list: + return self.sources + + return [ + ProjectSource(id=obj.metadata.id, type=obj.metadata.type) + for obj in self.sources.__dict__.values() + ] + + +@dataclass(order=False, kw_only=True) +class ProjectMetadata: + id: UUID = field(default_factory=uuid4) + name: str = "" + description: str = "" + version: VersionInfo = field(default_factory=lambda: FormatVersion.PROJECT) + generator: Optional[Generator] = None diff --git a/src/pyopf/project/project.py b/src/pyopf/project/project.py new file mode 100644 index 0000000..debc4ed --- /dev/null +++ b/src/pyopf/project/project.py @@ -0,0 +1,276 @@ +from typing import Any, Dict, List, Optional +from uuid import UUID + +from ..formats import ( + CoreFormat, + Format, + NamedFormat, + format_from_str, + from_format, +) +from ..types import OpfObject, VersionInfo +from ..util import ( + from_list, + from_none, + from_str, + from_union, + from_version_info, + to_class, +) +from ..versions import FormatVersion, format_and_version_to_type +from .types import ( + NamedProjectItemType, + ProjectItemType, + from_project_item_type, + project_item_type_from_str, +) + + +def _item_type_to_str(x: ProjectItemType) -> str: + return x.name if isinstance(x, NamedProjectItemType) else x.value + + +class Generator: + """The generator of this project""" + + """The name of the generator""" + name: str + """The version of the generator`""" + version: str + + def __init__(self, name: str, version: str) -> None: + self.name = name + self.version = version + + @staticmethod + def from_dict(obj: Any) -> "Generator": + assert isinstance(obj, dict) + name = from_str(obj.get("name")) + version = from_str(obj.get("version")) + return Generator(name, version) + + def to_dict(self) -> dict: + result: dict = {} + result["name"] = from_str(self.name) + result["version"] = from_str(self.version) + return result + + +class ProjectResource(OpfObject): + """The storage format of this resource.""" + + format: Format + """URI reference of the resource file as specified by + [RFC2396](https://www.w3.org/2001/03/identification-problem/rfc2396-uri-references.html). + If the reference is relative, it is relative to the folder containing the present file + """ + uri: str + + def __init__( + self, + format: Format, + uri: str, + ) -> None: + super(ProjectResource, self).__init__() + self.format = format + self.uri = uri + + @staticmethod + def from_dict(obj: Any) -> "ProjectResource": + assert isinstance(obj, dict) + format = from_union([from_format, format_from_str], obj.get("format")) + uri = from_str(obj.get("uri")) + result = ProjectResource(format, uri) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectResource, self).to_dict() + if isinstance(self.format, NamedFormat): + result["format"] = self.format.name + else: + result["format"] = self.format.value + result["uri"] = from_str(self.uri) + return result + + +class ProjectSource(OpfObject): + id: UUID + type: ProjectItemType + + def __init__( + self, + id: UUID, + type: ProjectItemType, + ) -> None: + super(ProjectSource, self).__init__() + self.id = id + self.type = type + + @staticmethod + def from_dict(obj: Any) -> "ProjectSource": + assert isinstance(obj, dict) + id = UUID(obj.get("id")) + type = from_union( + [from_project_item_type, project_item_type_from_str], obj.get("type") + ) + result = ProjectSource(id, type) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectSource, self).to_dict() + result["id"] = str(self.id) + result["type"] = _item_type_to_str(self.type) + return result + + +class ProjectItem(OpfObject): + id: UUID + """The name of this item""" + name: Optional[str] + """The resources that constitute this item""" + resources: List[ProjectResource] + """The sources of this items, that is the set of items this item depends on""" + sources: List[ProjectSource] + """Define the type of data represented by the item.""" + type: ProjectItemType + """Labels associated to the item""" + labels: Optional[List[str]] + + def __init__( + self, + id: UUID, + type: ProjectItemType, + resources: List[ProjectResource], + sources: List[ProjectSource], + name: Optional[str] = None, + labels: Optional[List[str]] = None, + ) -> None: + super(ProjectItem, self).__init__() + self.id = id + self.name = name + self.resources = resources + self.sources = sources + self.type = type + self.labels = labels + + @staticmethod + def from_dict(obj: Any) -> "ProjectItem": + assert isinstance(obj, dict) + id = UUID(obj.get("id")) + name = from_union([from_str, from_none], obj.get("name")) + resources = from_list(ProjectResource.from_dict, obj.get("resources")) + sources = from_list(ProjectSource.from_dict, obj.get("sources")) + type = from_union( + [from_project_item_type, project_item_type_from_str], obj.get("type") + ) + labels = from_union( + [lambda x: from_list(from_str, x), from_none], obj.get("labels") + ) + result = ProjectItem(id, type, resources, sources, name, labels) + result._extract_unknown_properties_and_extensions(obj) + return result + + def to_dict(self) -> dict: + result = super(ProjectItem, self).to_dict() + result["id"] = str(self.id) + if self.name is not None: + result["name"] = from_union([from_str, from_none], self.name) + result["resources"] = from_list( + lambda x: to_class(ProjectResource, x), self.resources + ) + result["sources"] = from_list( + lambda x: to_class(ProjectSource, x), self.sources + ) + if isinstance(self.type, NamedProjectItemType): + result["type"] = self.type.name + else: + result["type"] = self.type.value + if self.labels is not None: + result["labels"] = from_union( + [lambda x: from_list(from_str, x), from_none], self.labels + ) + return result + + +class Project(OpfObject): + """Project Structure""" + + """The description of the project""" + description: str + """The generator of this project""" + generator: Optional[Generator] + id: UUID + """The items contained in this project""" + items: List[ProjectItem] + """The name of the project""" + name: str + """The version of this specification as `MAJOR.MINOR`. Breaking changes are reflected by a + change in MAJOR version. Can optionally include a pre-release tag `MAJOR.MINOR-tag`. + Examples: `0.1`, `1.0`, `1.0-draft1` + """ + version: VersionInfo + + base_uri: Optional[str] = None + """Base URI to be used to resolve relative URI reference of project resources.""" + + format = CoreFormat.PROJECT + + def __init__( + self, + id: UUID, + name: str, + description: str, + items: List[ProjectItem], + version: VersionInfo = FormatVersion.PROJECT, + generator: Optional[Generator] = None, + ) -> None: + super(Project, self).__init__() + self.description = description + self.generator = generator + self.id = id + self.items = items + self.name = name + self.version = version + + @staticmethod + def from_dict(obj: Any) -> "Project": + assert isinstance(obj, dict) + description = from_str(obj.get("description")) + assert from_str(obj.get("format")) == CoreFormat.PROJECT + + generator = from_union([Generator.from_dict, from_none], obj.get("generator")) + id = UUID(obj.get("id")) + items = from_list(ProjectItem.from_dict, obj.get("items")) + name = from_str(obj.get("name")) + version = from_union([from_version_info, VersionInfo.parse], obj.get("version")) + result = Project( + id, + name, + description, + items, + version, + generator, + ) + result._extract_unknown_properties_and_extensions(obj, ["format"]) + return result + + def to_dict(self) -> dict: + result = super(Project, self).to_dict() + result["description"] = from_str(self.description) + result["format"] = CoreFormat.PROJECT + + if self.generator is not None: + result["generator"] = from_union( + [lambda x: to_class(Generator, x), from_none], + self.generator, + ) + result["id"] = str(self.id) + result["items"] = from_list(lambda x: to_class(ProjectItem, x), self.items) + result["name"] = from_str(self.name) + result["version"] = str(self.version) + return result + + +format_and_version_to_type[(CoreFormat.PROJECT, FormatVersion.PROJECT)] = Project diff --git a/src/pyopf/project/project_objects.py b/src/pyopf/project/project_objects.py new file mode 100644 index 0000000..61e3894 --- /dev/null +++ b/src/pyopf/project/project_objects.py @@ -0,0 +1,148 @@ +from dataclasses import dataclass, field + +from pyopf.project.types import ExtensionProjectItemType + +from ..cameras import ( + CalibratedCameras, + CameraList, + GpsBias, + InputCameras, + ProjectedInputCameras, +) +from ..cps import ( + CalibratedControlPoints, + Constraints, + InputControlPoints, + ProjectedControlPoints, +) +from ..crs import SceneReferenceFrame +from ..formats import ExtensionFormat +from ..items import BaseItem, ExtensionItem +from ..pointcloud.pcl import GlTFPointCloud +from .metadata import ProjectMetadata + + +@dataclass(eq=False, order=False, kw_only=True) +class Calibration: + calibrated_cameras_objs: list[CalibratedCameras] = field(default_factory=list) + calibrated_control_points_objs: list[CalibratedControlPoints] = field( + default_factory=list + ) + point_cloud_objs: list[GlTFPointCloud] = field(default_factory=list) + gps_bias: GpsBias = None + + _metadata: ProjectMetadata = field(default_factory=ProjectMetadata) + + @property + def calibrated_cameras(self): + if len(self.calibrated_cameras_objs) != 0: + return self.calibrated_cameras_objs[0] + return None + + @property + def calibrated_control_points(self): + if len(self.calibrated_control_points_objs) != 0: + return self.calibrated_control_points_objs[0] + return None + + @property + def tracks(self): + if len(self.point_cloud_objs) != 0: + return self.point_cloud_objs[0] + return None + + @property + def metadata(self): + return self._metadata + + @metadata.setter + def metadata(self, metadata): + self._metadata = metadata + + +@dataclass(eq=False, order=False, kw_only=True) +class ProjectObjects: + + scene_reference_frame_objs: list[SceneReferenceFrame] = field(default_factory=list) + camera_list_objs: list[CameraList] = field(default_factory=list) + input_cameras_objs: list[InputCameras] = field(default_factory=list) + projected_input_cameras_objs: list[ProjectedInputCameras] = field( + default_factory=list + ) + constraints_objs: list[Constraints] = field(default_factory=list) + input_control_points_objs: list[InputControlPoints] = field(default_factory=list) + projected_control_points_objs: list[ProjectedControlPoints] = field( + default_factory=list + ) + calibration_objs: list[Calibration] = field(default_factory=list) + point_cloud_objs: list[GlTFPointCloud] = field(default_factory=list) + extensions: list[ExtensionItem] = field(default_factory=list) + + _metadata: ProjectMetadata = field(default_factory=ProjectMetadata) + + @property + def metadata(self): + return self._metadata + + @property + def scene_reference_frame(self): + if len(self.scene_reference_frame_objs) != 0: + return self.scene_reference_frame_objs[0] + return None + + @property + def camera_list(self): + if len(self.camera_list_objs) != 0: + return self.camera_list_objs[0] + return None + + @property + def input_cameras(self): + if len(self.input_cameras_objs) != 0: + return self.input_cameras_objs[0] + return None + + @property + def projected_input_cameras(self): + if len(self.projected_input_cameras_objs) != 0: + return self.projected_input_cameras_objs[0] + return None + + @property + def constraints(self): + if len(self.constraints_objs) != 0: + return self.constraints_objs[0] + return None + + @property + def input_control_points(self): + if len(self.input_control_points_objs) != 0: + return self.input_control_points_objs[0] + return None + + @property + def projected_control_points(self): + if len(self.projected_control_points_objs) != 0: + return self.projected_control_points_objs[0] + return None + + @property + def calibration(self): + if len(self.calibration_objs) != 0: + return self.calibration_objs[0] + return None + + @property + def point_cloud(self): + if len(self.point_cloud_objs) != 0: + return self.point_cloud_objs[0] + return None + + def get_extensions_by_format( + self, searched_format: ExtensionFormat + ) -> list[ExtensionProjectItemType]: + found_extensions = [] + for extension in self.extensions: + if extension.format == searched_format: + found_extensions.append(extension) + return found_extensions diff --git a/src/pyopf/project/types.py b/src/pyopf/project/types.py new file mode 100644 index 0000000..5035b33 --- /dev/null +++ b/src/pyopf/project/types.py @@ -0,0 +1,73 @@ +from enum import Enum +from types import DynamicClassAttribute +from typing import Any, Union + +from ..util import from_union + + +class CoreProjectItemType(Enum): + """Project item type for items defined in the core OPF spec""" + + @DynamicClassAttribute + def name(self) -> str: + return self.value + + CALIBRATION = "calibration" + CAMERA_LIST = "camera_list" + CONSTRAINTS = "constraints" + INPUT_CAMERAS = "input_cameras" + INPUT_CONTROL_POINTS = "input_control_points" + POINT_CLOUD = "point_cloud" + PROJECTED_CONTROL_POINTS = "projected_control_points" + PROJECTED_INPUT_CAMERAS = "projected_input_cameras" + SCENE_REFERENCE_FRAME = "scene_reference_frame" + + +class NamedProjectItemType: + name: str + + def __str__(self): + return self.name + + def __repr__(self): + return '%s("%s")' % (self.__class__.__name__, self.name) + + +class ExtensionProjectItemType(NamedProjectItemType): + """Project item type for items defined in extensions.""" + + """The item type name. Must begin with "ext_".""" + + def __init__(self, name: str): + if not name.startswith("ext_"): + raise ValueError( + "Invalid name for extension project item type, it must start with ext_" + ) + self.name = name + + def __eq__(self, other: "ExtensionProjectItemType"): + return self.name == other.name + + +class UnknownProjectItemType(NamedProjectItemType): + def __init__(self, name: str): + self.name = name + + def __eq__(self, other: "UnknownProjectItemType"): + return self.name == other.name + + +ProjectItemType = ( + CoreProjectItemType | ExtensionProjectItemType | UnknownProjectItemType +) + + +def project_item_type_from_str(x: Any) -> ProjectItemType: + return from_union( + [CoreProjectItemType, ExtensionProjectItemType, UnknownProjectItemType], x + ) + + +def from_project_item_type(x: ProjectItemType) -> ProjectItemType: + assert isinstance(x, ProjectItemType) + return x diff --git a/src/pyopf/resolve/__init__.py b/src/pyopf/resolve/__init__.py new file mode 100644 index 0000000..9607265 --- /dev/null +++ b/src/pyopf/resolve/__init__.py @@ -0,0 +1 @@ +from .resolver import resolve diff --git a/src/pyopf/resolve/resolver.py b/src/pyopf/resolve/resolver.py new file mode 100644 index 0000000..13aa0fe --- /dev/null +++ b/src/pyopf/resolve/resolver.py @@ -0,0 +1,139 @@ +import copy +from pathlib import Path +from typing import Any, List +from urllib.parse import unquote, urljoin, urlparse +from uuid import UUID + +from .. import io +from ..pointcloud.pcl import GlTFPointCloud +from ..project import Calibration, Project, ProjectObjects, ProjectSource +from ..project.metadata import Metadata, Sources +from ..project.types import ( + CoreProjectItemType, + NamedProjectItemType, + ProjectItemType, +) +from ..types import CoreFormat, Format, format_to_str + + +def _item_type_to_str(x: ProjectItemType) -> str: + return x.name if isinstance(x, NamedProjectItemType) else x.value + + +def _format_to_name(x: Format) -> str: + if x.startswith("application"): + prefix_len = len("application/opf-") + return (x.value[prefix_len:]).split("+")[0].replace("-", "_") + elif x == CoreFormat.GLTF_MODEL: + return "point_cloud" + else: + raise RuntimeError("Unsupported format " + x) + + +def _resolve_sources(sources: list[ProjectSource], objects_by_id: dict[UUID, Any]): + result = Sources() + for source in sources: + try: + obj = objects_by_id[source.id] + if source.type != obj.metadata.type: + raise RuntimeError( + "Inconsistent project item dependency. " + 'The source %s was declared as "%s", ' + 'but the item is "%s"' + % (source.id, source.type.name, obj.metadata.type.name) + ) + name = _item_type_to_str(source.type) + if hasattr(result, name): + # Only a source of a given type is supported, source will + # remain unresolved + return None + setattr(result, name, obj) + + except KeyError: + # Not all sources could be resolved + return None + return result + + +def resolve(project: Project, supported_extensions=[]): + """Take an OPF project and return an object that contains its items + loaded in named attributes for easier manipulation.""" + + result = ProjectObjects() + + objects_by_id = {} + + for item in project.items: + is_core_item = isinstance(item.type, CoreProjectItemType) + is_supported_extension = item.type.name in supported_extensions + + if item.type == CoreProjectItemType.CALIBRATION: + + calibration = Calibration() + + calibration.metadata = Metadata.from_item(item) + + for resource in item.resources: + + obj = io.load(resource, project.base_uri, item.resources) + if obj is None: + continue + + name = _format_to_name(resource.format) + if name == "gps_bias": + # Only one GPS bias resource is acceptable + if calibration.gps_bias is not None: + raise RuntimeError( + "A calibration cannnot contain multiple GPS bias resources" + ) + calibration.gps_bias = obj + else: + calibration.__dict__.setdefault(name + "_objs", []).append(obj) + + result.calibration_objs.append(calibration) + + elif len(item.resources) == 1 and (is_core_item or is_supported_extension): + obj = io.load(item.resources[0].uri, project.base_uri) + obj.metadata = Metadata.from_item(item) + + if obj.format != item.resources[0].format: + raise RuntimeError( + "Inconsistent resource format detected. The resource %s" + ' was declared as "%s", but the target URI contains "%s"' + % (item.resources[0].uri, item.resources[0].format, obj.format) + ) + + name = _item_type_to_str(obj.metadata.type) + objects_by_id[obj.metadata.id] = obj + + if is_core_item: + result.__dict__[name + "_objs"].append(obj) + else: + result.__dict__["extensions"].append(obj) + + elif item.type == CoreProjectItemType.POINT_CLOUD: + gltf_uri = next( + resource.uri + for resource in item.resources + if resource.format == CoreFormat.GLTF_MODEL + ) + point_cloud = io.load(gltf_uri, project.base_uri) + point_cloud.metadata = Metadata.from_item(item) + + result.point_cloud_objs.append(point_cloud) + else: + pass + + # Resolving source references + for obj in objects_by_id.values(): + + sources = _resolve_sources(obj.metadata.sources, objects_by_id) + if sources: + obj.metadata.sources = sources + + result.metadata.id = project.id + result.metadata.version = project.version + result.metadata.name = project.name + result.metadata.description = project.description + + return result diff --git a/src/pyopf/types.py b/src/pyopf/types.py new file mode 100644 index 0000000..7131bd3 --- /dev/null +++ b/src/pyopf/types.py @@ -0,0 +1,128 @@ +import copy +from abc import abstractmethod +from typing import Any, Dict, Optional, Type, TypeVar + +from typing_extensions import Self + +from .formats import ( + CoreFormat, + ExtensionFormat, + Extensions, + Format, + format_from_str, + format_to_str, + from_format, +) +from .uid64 import Uid64 +from .util import from_extensions, from_uid, from_union, from_version_info +from .VersionInfo import VersionInfo + + +def _extract_unknown_properties(opf_object: Any, obj: dict, ignore_keys=set()) -> Any: + result = { + key: copy.deepcopy(val) + for key, val in obj.items() + if key not in opf_object.__dict__ and key not in ignore_keys + } + return None if len(result) == 0 else result + + +class OpfPropertyExtObject(object): + """Base class for OPF extension property objects. + This class is similar to OpfObject, but it doesn't contain any logic to handle + extensions as extensions on extensions are not allowed.""" + + unknown_properties: Optional[dict] + extension_name: str = "" + version: Optional[VersionInfo] = None + + def __init__(self, unknown_properties: Optional[dict] = None): + self.unknown_properties = unknown_properties + + def to_dict(self) -> dict: + result = ( + {} + if self.unknown_properties is None + else copy.deepcopy(self.unknown_properties) + ) + return result + + @staticmethod + @abstractmethod + def from_dict(obj: Any) -> None: + return None + + def _extract_unknown_properties_and_extensions( + self, obj: dict, ignore_keys=set() + ) -> "OpfPropertyExtObject": + """This function is meant to be called from `from_dict` static methods to + identify all unkonwn properties and store them in self.unown_properties. + + See OpfObject._extract_unknown_properties_and_extensions for details. + """ + self.unknown_properties = _extract_unknown_properties(self, obj, ignore_keys) + return self + + +class OpfObject: + """Base class for any OPF object. + This class contains the logic for making OPF objects extensible and preserve + unknown properties during parsing and serialization.""" + + extensions: Extensions + unknown_properties: Optional[dict] + + def __init__( + self, + extensions: Optional[Extensions] = None, + unknown_properties: Optional[dict] = None, + ): + self.extensions = extensions + self.unknown_properties = unknown_properties + + def to_dict(self, *known_extensions) -> dict: + result = ( + {} + if self.unknown_properties is None + else copy.deepcopy(self.unknown_properties) + ) + if self.extensions is not None: + extensions = from_extensions(self.extensions) + extensions = {} + for extension in known_extensions: + if extension is not None: + extensions[extension.extension_name] = extension.to_dict() + if len(extensions) != 0: + result["extensions"] = extensions + return result + + def _extract_unknown_properties_and_extensions(self, obj: dict, ignore_keys=set()): + """This function is meant to be called from `from_dict` static methods to + retrieve the extensions and store them in self.extensions and identify + all unkonwn properties and store them in self.unkown_properties. + + The implementation copies the input dict first and then removes all the entries + whose key matches an attribute of the object. This uses self.dict(), which + means it may not work if using slots. It also imples requires that class + attributes must use the same name as the JSON attributes. + + The set of keys in ignore_keys are not considered unknown properties. This is + used for example when a property is parsed to discern the type of an object + but the property itself is not stored in the final object. + """ + self.extensions = from_extensions(obj.get("extensions")) + assert ignore_keys is not None + self.unknown_properties = _extract_unknown_properties(self, obj, ignore_keys) + + T = TypeVar("T", bound="OpfPropertyExtObject") + + def _extract_known_extension(self, cls: Type[T]) -> Optional[T]: + if self.extensions is None: + return None + try: + extension = self.extensions[cls.extension_name] + except KeyError: + return None + result = cls.from_dict(extension) + del self.extensions[cls.extension_name] + return result diff --git a/src/pyopf/uid64.py b/src/pyopf/uid64.py new file mode 100644 index 0000000..712263f --- /dev/null +++ b/src/pyopf/uid64.py @@ -0,0 +1,69 @@ +import os + +int_ = int # The built-in int type +bytes_ = bytes # The built-in bytes type + + +class Uid64: + __slots__ = "int" + + def __init__( + self, + int: int_ | None = None, + hex: str | None = None, + bytes: bytes_ | None = None, + ): + + if [hex, bytes, int].count(None) != 2: + raise TypeError("Only one of int or hex must be given") + + if hex is not None: + int = int_(hex, 16) + + if bytes is not None: + if len(bytes) != 8: + raise ValueError("bytes is not a 8-char string") + assert isinstance(bytes, bytes_), repr(bytes) + int = int_.from_bytes(bytes, byteorder="big") + + if int is not None: + if not 0 <= int < 1 << 64: + raise ValueError("int is out of range (need a 64-bit value)") + object.__setattr__(self, "int", int) + + @property + def bytes(self): + return self.int.to_bytes(8, "big") + + @property + def hex(self): + return self.__str__() + + def __int__(self): + return self.int + + def __eq__(self, other): + if isinstance(other, Uid64): + return self.int == other.int + if isinstance(other, int_): + return self.int == other + return NotImplemented + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, str(self)) + + def __setattr__(self, name, value): + raise TypeError("Uid64 objects are immutable") + + def __str__(self): + return "0x%016X" % self.int + + def __hash__(self): + return hash(self.int) + + def __deepcopy__(self, _memo): + return self + + +def uid64(): + return Uid64(bytes=os.urandom(8)) diff --git a/src/pyopf/util.py b/src/pyopf/util.py new file mode 100644 index 0000000..5f22c4d --- /dev/null +++ b/src/pyopf/util.py @@ -0,0 +1,119 @@ +from enum import Enum +from types import UnionType +from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, cast + +import numpy as np + +from pyopf.uid64 import Uid64 + +from .VersionInfo import VersionInfo + +T = TypeVar("T") +EnumT = TypeVar("EnumT", bound=Enum) +IntType = int | np.int64 | np.int32 + + +def from_str(x: Any) -> str: + assert isinstance(x, str) + return x + + +def from_bool(x: Any) -> bool: + assert isinstance(x, bool) + return x + + +def from_list(f: Callable[[Any], T], x: Any) -> List[T]: + assert isinstance(x, list) or isinstance(x, np.ndarray) + return [f(y) for y in x] + + +def from_dict(f: Callable[[Any], T], x: Any) -> Dict[str, T]: + assert isinstance(x, dict) + return {k: f(v) for (k, v) in x.items()} + + +def from_version_info(x: Any) -> VersionInfo: + assert isinstance(x, VersionInfo) + return x + + +def from_none(x: Any) -> Any: + assert x is None + return x + + +def from_uid(x: Any) -> Uid64: + if isinstance(x, str): + return Uid64(hex=x) + if isinstance(x, int): + return Uid64(int=x) + if isinstance(x, bytes): + return Uid64(bytes=x) + raise ValueError("Unsupported dtype") + + +def from_union(fs, x): + for f in fs: + try: + return f(x) + except Exception: + pass + assert False + + +def vector_from_list( + x: Any, min_size: int = -1, max_size: int = -1, dtype: type | str = "f8" +) -> np.ndarray: + if max_size != -1 and len(x) > max_size: + raise ValueError("Invalid array length") + if max_size != -1 and len(x) < min_size: + raise ValueError("Invalid array length") + + if (type(dtype) is str and "f" in dtype) or dtype is float: + return np.array(from_list(from_float, x), dtype=dtype) + elif (type(dtype) is str and "i" in dtype) or dtype is int: + return np.array(from_list(from_int, x), dtype=dtype) + else: + raise ValueError("Unsupported dtype") + + +def from_int(x: Any) -> IntType: + assert isinstance(x, (int, np.int64, np.int32)) and not isinstance(x, bool) # type: ignore + return x + + +def from_float(x: Any) -> float: + assert isinstance(x, (float, int, np.float32, np.float64)) and not isinstance( # type: ignore + x, bool + ) + return float(x) + + +def to_float(x: Any) -> float: + assert isinstance(x, float) + return x + + +def to_int(x: Any) -> int: + assert isinstance(x, (int, np.int64, np.int32)) # type: ignore + return int(x) + + +def to_class( + c: type | UnionType, + x: "OpfObject | OpfPropertyExtObject", # noqa: F821 # type: ignore +) -> dict: + assert isinstance(x, c) + return x.to_dict() + + +def to_enum(c: Type[EnumT], x: Any) -> EnumT: + assert isinstance(x, c) + return x.value + + +def from_extensions(x: Any) -> Optional[Dict[str, Dict[str, Any]]]: + return from_union( + [lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], x + ) diff --git a/src/pyopf/versions.py b/src/pyopf/versions.py new file mode 100644 index 0000000..e9ad59a --- /dev/null +++ b/src/pyopf/versions.py @@ -0,0 +1,20 @@ +from .VersionInfo import VersionInfo + + +# To be generated from the schemas +class FormatVersion: + CALIBRATED_CAMERAS = VersionInfo(1, 0) + CALIBRATED_CONTROL_POINTS = VersionInfo(1, 0) + CAMERA_LIST = VersionInfo(1, 0) + CONSTRAINTS = VersionInfo(1, 0) + GLTF_OPF_ASSET = VersionInfo(1, 0) + GPS_BIAS = VersionInfo(1, 0) + INPUT_CAMERAS = VersionInfo(1, 0) + INPUT_CONTROL_POINTS = VersionInfo(1, 0) + PROJECT = VersionInfo(1, 0) + PROJECTED_CONTROL_POINTS = VersionInfo(1, 0) + PROJECTED_INPUT_CAMERAS = VersionInfo(1, 0) + SCENE_REFERENCE_FRAME = VersionInfo(1, 0) + + +format_and_version_to_type = {}