diff --git a/0.6.1/404.html b/0.6.1/404.html new file mode 100644 index 000000000..c25f3fb88 --- /dev/null +++ b/0.6.1/404.html @@ -0,0 +1,1252 @@ + + + +
+ + + + + + + + + + + + + + + + +Closed issues:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Fixed bugs:
+Closed issues:
+Merged pull requests:
+is_defined
into a ThingClass property and improved its documentation. #597 (jesper-friis)Fixed bugs:
+Merged pull requests:
+Fixed bugs:
+LegacyVersion
does not exist in packaging.version
#540is_instance_of
property to be iterable #506images/material.png
#495Closed issues:
+Merged pull requests:
+Fixed bugs:
+bandit
failing #478Closed issues:
+Merged pull requests:
+Merged pull requests:
+Fixed bugs:
+Closed issues:
+Merged pull requests:
+Implemented enhancements:
+pre-commit
#243Ontology
#228Fixed bugs:
+rdflib
import #306get_triples()
method #280Closed issues:
+Merged pull requests:
+ID!
type instead of String!
#375 (CasperWA)pre-commit
& various tools #245 (CasperWA)Implemented enhancements:
+collections
#236Fixed bugs:
+Closed issues:
+factpluspluswrapper
folders #213mike
for versioned documentation #197Merged pull requests:
+packaging
to list of requirements #256 (CasperWA)collections.abc
when possible #240 (CasperWA)__init__.py
files for FaCT++ wrapper (again) #221 (CasperWA)Closed issues:
+Merged pull requests:
+Fixed bugs:
+Closed issues:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Implemented enhancements:
+Closed issues:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Closed issues:
+ +Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+Merged pull requests:
+Implemented enhancements:
+Merged pull requests:
+Closed issues:
+ +Closed issues:
+Merged pull requests:
+Closed issues:
+Merged pull requests:
+* This Changelog was automatically generated by github_changelog_generator
+ + + + + + + + + + + + + +Copyright 2019-2022 SINTEF
+Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met:
+Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution.
+Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ + + + + + + + + + + + + +A module for testing an ontology against conventions defined for EMMO.
+A YAML file can be provided with additional test configurations.
+Example configuration file:
+test_unit_dimensions:
+ exceptions:
+ - myunits.MyUnitCategory1
+ - myunits.MyUnitCategory2
+
+skip:
+ - name_of_test_to_skip
+
+enable:
+ - name_of_test_to_enable
+
+TestEMMOConventions
+
+
+
+¶Base class for testing an ontology against EMMO conventions.
+ +emmopy/emmocheck.py
class TestEMMOConventions(unittest.TestCase):
+ """Base class for testing an ontology against EMMO conventions."""
+
+ config = {} # configurations
+
+ def get_config(self, string, default=None):
+ """Returns the configuration specified by `string`.
+
+ If configuration is not found in the configuration file, `default` is
+ returned.
+
+ Sub-configurations can be accessed by separating the components with
+ dots, like "test_namespace.exceptions".
+ """
+ result = self.config
+ try:
+ for token in string.split("."):
+ result = result[token]
+ except KeyError:
+ return default
+ return result
+
get_config(self, string, default=None)
+
+
+¶Returns the configuration specified by string
.
If configuration is not found in the configuration file, default
is
+returned.
Sub-configurations can be accessed by separating the components with +dots, like "test_namespace.exceptions".
+ +emmopy/emmocheck.py
def get_config(self, string, default=None):
+ """Returns the configuration specified by `string`.
+
+ If configuration is not found in the configuration file, `default` is
+ returned.
+
+ Sub-configurations can be accessed by separating the components with
+ dots, like "test_namespace.exceptions".
+ """
+ result = self.config
+ try:
+ for token in string.split("."):
+ result = result[token]
+ except KeyError:
+ return default
+ return result
+
+TestFunctionalEMMOConventions
+
+
+
+¶Test functional EMMO conventions.
+ +emmopy/emmocheck.py
class TestFunctionalEMMOConventions(TestEMMOConventions):
+ """Test functional EMMO conventions."""
+
+ def test_unit_dimension(self):
+ """Check that all measurement units have a physical dimension.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ exceptions = set(
+ (
+ "metrology.MultipleUnit",
+ "metrology.SubMultipleUnit",
+ "metrology.OffSystemUnit",
+ "metrology.PrefixedUnit",
+ "metrology.NonPrefixedUnit",
+ "metrology.SpecialUnit",
+ "metrology.DerivedUnit",
+ "metrology.BaseUnit",
+ "metrology.UnitSymbol",
+ "siunits.SICoherentDerivedUnit",
+ "siunits.SINonCoherentDerivedUnit",
+ "siunits.SISpecialUnit",
+ "siunits.SICoherentUnit",
+ "siunits.SIPrefixedUnit",
+ "siunits.SIBaseUnit",
+ "siunits.SIUnitSymbol",
+ "siunits.SIUnit",
+ "emmo.MultipleUnit",
+ "emmo.SubMultipleUnit",
+ "emmo.OffSystemUnit",
+ "emmo.PrefixedUnit",
+ "emmo.NonPrefixedUnit",
+ "emmo.SpecialUnit",
+ "emmo.DerivedUnit",
+ "emmo.BaseUnit",
+ "emmo.UnitSymbol",
+ "emmo.SIAccepted",
+ "emmo.SICoherentDerivedUnit",
+ "emmo.SINonCoherentDerivedUnit",
+ "emmo.SISpecialUnit",
+ "emmo.SICoherentUnit",
+ "emmo.SIPrefixedUnit",
+ "emmo.SIBaseUnit",
+ "emmo.SIUnitSymbol",
+ "emmo.SIUnit",
+ )
+ )
+ if not hasattr(self.onto, "MeasurementUnit"):
+ return
+ exceptions.update(self.get_config("test_unit_dimension.exceptions", ()))
+ regex = re.compile(r"^(emmo|metrology).hasDimensionString.value\(.*\)$")
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.MeasurementUnit.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ # Assume that actual units are not subclassed
+ if not list(cls.subclasses()) and repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ self.assertTrue(
+ any(
+ regex.match(repr(r))
+ for r in cls.get_indirect_is_a()
+ ),
+ msg=cls,
+ )
+
+ def test_quantity_dimension_beta3(self):
+ """Check that all quantities have a physicalDimension annotation.
+
+ Note: this test will be deprecated when isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ exceptions = set(
+ (
+ "properties.ModelledQuantitativeProperty",
+ "properties.MeasuredQuantitativeProperty",
+ "properties.ConventionalQuantitativeProperty",
+ "metrology.QuantitativeProperty",
+ "metrology.Quantity",
+ "metrology.OrdinalQuantity",
+ "metrology.BaseQuantity",
+ "metrology.PhysicalConstant",
+ "metrology.PhysicalQuantity",
+ "metrology.ExactConstant",
+ "metrology.MeasuredConstant",
+ "metrology.DerivedQuantity",
+ "isq.ISQBaseQuantity",
+ "isq.InternationalSystemOfQuantity",
+ "isq.ISQDerivedQuantity",
+ "isq.SIExactConstant",
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.Quantity",
+ "emmo.OrdinalQuantity",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.AtomicAndNuclear",
+ "emmo.Defined",
+ "emmo.Electromagnetic",
+ "emmo.FrequentlyUsed",
+ "emmo.PhysicoChemical",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.Universal",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_quantity_dimension.exceptions", ())
+ )
+ regex = re.compile(
+ "^T([+-][1-9]|0) L([+-][1-9]|0) M([+-][1-9]|0) I([+-][1-9]|0) "
+ "(H|Θ)([+-][1-9]|0) N([+-][1-9]|0) J([+-][1-9]|0)$"
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ anno = cls.get_annotations()
+ self.assertIn("physicalDimension", anno, msg=cls)
+ physdim = anno["physicalDimension"].first()
+ self.assertRegex(physdim, regex, msg=cls)
+
+ def test_quantity_dimension(self):
+ """Check that all quantities have a physicalDimension.
+
+ Note: this test will be deprecated when isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ # pylint: disable=invalid-name
+ exceptions = set(
+ (
+ "properties.ModelledQuantitativeProperty",
+ "properties.MeasuredQuantitativeProperty",
+ "properties.ConventionalQuantitativeProperty",
+ "metrology.QuantitativeProperty",
+ "metrology.Quantity",
+ "metrology.OrdinalQuantity",
+ "metrology.BaseQuantity",
+ "metrology.PhysicalConstant",
+ "metrology.PhysicalQuantity",
+ "metrology.ExactConstant",
+ "metrology.MeasuredConstant",
+ "metrology.DerivedQuantity",
+ "isq.ISQBaseQuantity",
+ "isq.InternationalSystemOfQuantity",
+ "isq.ISQDerivedQuantity",
+ "isq.SIExactConstant",
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.Quantity",
+ "emmo.OrdinalQuantity",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.ISO80000Categorised",
+ "emmo.AtomicAndNuclear",
+ "emmo.Defined",
+ "emmo.Electromagnetic",
+ "emmo.FrequentlyUsed",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.EquilibriumConstant", # physical dimension may change
+ "emmo.Solubility",
+ "emmo.Universal",
+ "emmo.Intensive",
+ "emmo.Extensive",
+ "emmo.Concentration",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_quantity_dimension.exceptions", ())
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if issubclass(cls, self.onto.ISO80000Categorised):
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ for r in cls.get_indirect_is_a():
+ if isinstance(r, owlready2.Restriction) and repr(
+ r
+ ).startswith("emmo.hasMeasurementUnit.some"):
+ self.assertTrue(
+ issubclass(
+ r.value,
+ (
+ self.onto.DimensionalUnit,
+ self.onto.DimensionlessUnit,
+ ),
+ )
+ )
+ break
+ else:
+ self.assertTrue(
+ issubclass(cls, self.onto.ISQDimensionlessQuantity)
+ )
+
+ def test_dimensional_unit(self):
+ """Check correct syntax of dimension string of dimensional units."""
+
+ # This test requires that the ontology has imported SIDimensionalUnit
+ if "SIDimensionalUnit" not in self.onto:
+ self.skipTest("SIDimensionalUnit is not imported")
+
+ # pylint: disable=invalid-name
+ regex = re.compile(
+ "^T([+-][1-9][0-9]*|0) L([+-][1-9]|0) M([+-][1-9]|0) "
+ "I([+-][1-9]|0) (H|Θ)([+-][1-9]|0) N([+-][1-9]|0) "
+ "J([+-][1-9]|0)$"
+ )
+ for cls in self.onto.SIDimensionalUnit.__subclasses__():
+ with self.subTest(cls=cls, label=get_label(cls)):
+ self.assertEqual(len(cls.equivalent_to), 1)
+ r = cls.equivalent_to[0]
+ self.assertIsInstance(r, owlready2.Restriction)
+ self.assertRegex(r.value, regex)
+
+ def test_physical_quantity_dimension(self):
+ """Check that all physical quantities have `hasPhysicalDimension`.
+
+ Note: this test will fail before isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+
+ """
+ exceptions = set(
+ (
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.AtomicAndNuclearPhysicsQuantity",
+ "emmo.ThermodynamicalQuantity",
+ "emmo.LightAndRadiationQuantity",
+ "emmo.SpaceAndTimeQuantity",
+ "emmo.AcousticQuantity",
+ "emmo.PhysioChememicalQuantity",
+ "emmo.ElectromagneticQuantity",
+ "emmo.MechanicalQuantity",
+ "emmo.CondensedMatterPhysicsQuantity",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.Extensive",
+ "emmo.Intensive",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_physical_quantity_dimension.exceptions", ())
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ try:
+ class_props = cls.INDIRECT_get_class_properties()
+ except AttributeError:
+ # The INDIRECT_get_class_properties() method
+ # does not support inverse properties. Build
+ # class_props manually...
+ class_props = set()
+ for _ in cls.mro():
+ if hasattr(_, "is_a"):
+ class_props.update(
+ [
+ restriction.property
+ for restriction in _.is_a
+ if isinstance(
+ restriction, owlready2.Restriction
+ )
+ ]
+ )
+
+ self.assertIn(
+ self.onto.hasPhysicalDimension, class_props, msg=cls
+ )
+
+ def test_namespace(self):
+ """Check that all IRIs are namespaced after their (sub)ontology.
+
+ Configurations:
+ exceptions - full name of entities to ignore.
+ """
+ exceptions = set(
+ (
+ "owl.qualifiedCardinality",
+ "owl.minQualifiedCardinality",
+ "terms.creator",
+ "terms.contributor",
+ "terms.publisher",
+ "terms.title",
+ "terms.license",
+ "terms.abstract",
+ "core.prefLabel",
+ "core.altLabel",
+ "core.hiddenLabel",
+ "mereotopology.Item",
+ "manufacturing.EngineeredMaterial",
+ )
+ )
+ exceptions.update(self.get_config("test_namespace.exceptions", ()))
+
+ def checker(onto, ignore_namespace):
+ if list(
+ filter(onto.base_iri.strip("#").endswith, self.ignore_namespace)
+ ):
+ print(f"Skipping namespace: {onto.base_iri}")
+ return
+ entities = itertools.chain(
+ onto.classes(),
+ onto.object_properties(),
+ onto.data_properties(),
+ onto.individuals(),
+ onto.annotation_properties(),
+ )
+ for entity in entities:
+ if entity not in visited and repr(entity) not in exceptions:
+ visited.add(entity)
+ with self.subTest(
+ iri=entity.iri,
+ base_iri=onto.base_iri,
+ entity=repr(entity),
+ ):
+ self.assertTrue(
+ entity.iri.endswith(entity.name),
+ msg=(
+ "the final part of entity IRIs must be their "
+ "name"
+ ),
+ )
+ self.assertEqual(
+ entity.iri,
+ onto.base_iri + entity.name,
+ msg=(
+ f"IRI {entity.iri!r} does not correspond to "
+ f"module namespace: {onto.base_iri!r}"
+ ),
+ )
+
+ if self.check_imported:
+ for imp_onto in onto.imported_ontologies:
+ if imp_onto not in visited_onto:
+ visited_onto.add(imp_onto)
+ checker(imp_onto, ignore_namespace)
+
+ visited = set()
+ visited_onto = set()
+ checker(self.onto, self.ignore_namespace)
+
test_dimensional_unit(self)
+
+
+¶Check correct syntax of dimension string of dimensional units.
+ +emmopy/emmocheck.py
def test_dimensional_unit(self):
+ """Check correct syntax of dimension string of dimensional units."""
+
+ # This test requires that the ontology has imported SIDimensionalUnit
+ if "SIDimensionalUnit" not in self.onto:
+ self.skipTest("SIDimensionalUnit is not imported")
+
+ # pylint: disable=invalid-name
+ regex = re.compile(
+ "^T([+-][1-9][0-9]*|0) L([+-][1-9]|0) M([+-][1-9]|0) "
+ "I([+-][1-9]|0) (H|Θ)([+-][1-9]|0) N([+-][1-9]|0) "
+ "J([+-][1-9]|0)$"
+ )
+ for cls in self.onto.SIDimensionalUnit.__subclasses__():
+ with self.subTest(cls=cls, label=get_label(cls)):
+ self.assertEqual(len(cls.equivalent_to), 1)
+ r = cls.equivalent_to[0]
+ self.assertIsInstance(r, owlready2.Restriction)
+ self.assertRegex(r.value, regex)
+
test_namespace(self)
+
+
+¶Check that all IRIs are namespaced after their (sub)ontology.
+Configurations
+exceptions - full name of entities to ignore.
+emmopy/emmocheck.py
def test_namespace(self):
+ """Check that all IRIs are namespaced after their (sub)ontology.
+
+ Configurations:
+ exceptions - full name of entities to ignore.
+ """
+ exceptions = set(
+ (
+ "owl.qualifiedCardinality",
+ "owl.minQualifiedCardinality",
+ "terms.creator",
+ "terms.contributor",
+ "terms.publisher",
+ "terms.title",
+ "terms.license",
+ "terms.abstract",
+ "core.prefLabel",
+ "core.altLabel",
+ "core.hiddenLabel",
+ "mereotopology.Item",
+ "manufacturing.EngineeredMaterial",
+ )
+ )
+ exceptions.update(self.get_config("test_namespace.exceptions", ()))
+
+ def checker(onto, ignore_namespace):
+ if list(
+ filter(onto.base_iri.strip("#").endswith, self.ignore_namespace)
+ ):
+ print(f"Skipping namespace: {onto.base_iri}")
+ return
+ entities = itertools.chain(
+ onto.classes(),
+ onto.object_properties(),
+ onto.data_properties(),
+ onto.individuals(),
+ onto.annotation_properties(),
+ )
+ for entity in entities:
+ if entity not in visited and repr(entity) not in exceptions:
+ visited.add(entity)
+ with self.subTest(
+ iri=entity.iri,
+ base_iri=onto.base_iri,
+ entity=repr(entity),
+ ):
+ self.assertTrue(
+ entity.iri.endswith(entity.name),
+ msg=(
+ "the final part of entity IRIs must be their "
+ "name"
+ ),
+ )
+ self.assertEqual(
+ entity.iri,
+ onto.base_iri + entity.name,
+ msg=(
+ f"IRI {entity.iri!r} does not correspond to "
+ f"module namespace: {onto.base_iri!r}"
+ ),
+ )
+
+ if self.check_imported:
+ for imp_onto in onto.imported_ontologies:
+ if imp_onto not in visited_onto:
+ visited_onto.add(imp_onto)
+ checker(imp_onto, ignore_namespace)
+
+ visited = set()
+ visited_onto = set()
+ checker(self.onto, self.ignore_namespace)
+
test_physical_quantity_dimension(self)
+
+
+¶Check that all physical quantities have hasPhysicalDimension
.
Note: this test will fail before isq is moved to emmo/domain.
+Configurations
+exceptions - full class names of classes to ignore.
+emmopy/emmocheck.py
def test_physical_quantity_dimension(self):
+ """Check that all physical quantities have `hasPhysicalDimension`.
+
+ Note: this test will fail before isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+
+ """
+ exceptions = set(
+ (
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.AtomicAndNuclearPhysicsQuantity",
+ "emmo.ThermodynamicalQuantity",
+ "emmo.LightAndRadiationQuantity",
+ "emmo.SpaceAndTimeQuantity",
+ "emmo.AcousticQuantity",
+ "emmo.PhysioChememicalQuantity",
+ "emmo.ElectromagneticQuantity",
+ "emmo.MechanicalQuantity",
+ "emmo.CondensedMatterPhysicsQuantity",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.Extensive",
+ "emmo.Intensive",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_physical_quantity_dimension.exceptions", ())
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ try:
+ class_props = cls.INDIRECT_get_class_properties()
+ except AttributeError:
+ # The INDIRECT_get_class_properties() method
+ # does not support inverse properties. Build
+ # class_props manually...
+ class_props = set()
+ for _ in cls.mro():
+ if hasattr(_, "is_a"):
+ class_props.update(
+ [
+ restriction.property
+ for restriction in _.is_a
+ if isinstance(
+ restriction, owlready2.Restriction
+ )
+ ]
+ )
+
+ self.assertIn(
+ self.onto.hasPhysicalDimension, class_props, msg=cls
+ )
+
test_quantity_dimension(self)
+
+
+¶Check that all quantities have a physicalDimension.
+Note: this test will be deprecated when isq is moved to emmo/domain.
+Configurations
+exceptions - full class names of classes to ignore.
+emmopy/emmocheck.py
def test_quantity_dimension(self):
+ """Check that all quantities have a physicalDimension.
+
+ Note: this test will be deprecated when isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ # pylint: disable=invalid-name
+ exceptions = set(
+ (
+ "properties.ModelledQuantitativeProperty",
+ "properties.MeasuredQuantitativeProperty",
+ "properties.ConventionalQuantitativeProperty",
+ "metrology.QuantitativeProperty",
+ "metrology.Quantity",
+ "metrology.OrdinalQuantity",
+ "metrology.BaseQuantity",
+ "metrology.PhysicalConstant",
+ "metrology.PhysicalQuantity",
+ "metrology.ExactConstant",
+ "metrology.MeasuredConstant",
+ "metrology.DerivedQuantity",
+ "isq.ISQBaseQuantity",
+ "isq.InternationalSystemOfQuantity",
+ "isq.ISQDerivedQuantity",
+ "isq.SIExactConstant",
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.Quantity",
+ "emmo.OrdinalQuantity",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.ISO80000Categorised",
+ "emmo.AtomicAndNuclear",
+ "emmo.Defined",
+ "emmo.Electromagnetic",
+ "emmo.FrequentlyUsed",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.EquilibriumConstant", # physical dimension may change
+ "emmo.Solubility",
+ "emmo.Universal",
+ "emmo.Intensive",
+ "emmo.Extensive",
+ "emmo.Concentration",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_quantity_dimension.exceptions", ())
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if issubclass(cls, self.onto.ISO80000Categorised):
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ for r in cls.get_indirect_is_a():
+ if isinstance(r, owlready2.Restriction) and repr(
+ r
+ ).startswith("emmo.hasMeasurementUnit.some"):
+ self.assertTrue(
+ issubclass(
+ r.value,
+ (
+ self.onto.DimensionalUnit,
+ self.onto.DimensionlessUnit,
+ ),
+ )
+ )
+ break
+ else:
+ self.assertTrue(
+ issubclass(cls, self.onto.ISQDimensionlessQuantity)
+ )
+
test_quantity_dimension_beta3(self)
+
+
+¶Check that all quantities have a physicalDimension annotation.
+Note: this test will be deprecated when isq is moved to emmo/domain.
+Configurations
+exceptions - full class names of classes to ignore.
+emmopy/emmocheck.py
def test_quantity_dimension_beta3(self):
+ """Check that all quantities have a physicalDimension annotation.
+
+ Note: this test will be deprecated when isq is moved to emmo/domain.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ exceptions = set(
+ (
+ "properties.ModelledQuantitativeProperty",
+ "properties.MeasuredQuantitativeProperty",
+ "properties.ConventionalQuantitativeProperty",
+ "metrology.QuantitativeProperty",
+ "metrology.Quantity",
+ "metrology.OrdinalQuantity",
+ "metrology.BaseQuantity",
+ "metrology.PhysicalConstant",
+ "metrology.PhysicalQuantity",
+ "metrology.ExactConstant",
+ "metrology.MeasuredConstant",
+ "metrology.DerivedQuantity",
+ "isq.ISQBaseQuantity",
+ "isq.InternationalSystemOfQuantity",
+ "isq.ISQDerivedQuantity",
+ "isq.SIExactConstant",
+ "emmo.ModelledQuantitativeProperty",
+ "emmo.MeasuredQuantitativeProperty",
+ "emmo.ConventionalQuantitativeProperty",
+ "emmo.QuantitativeProperty",
+ "emmo.Quantity",
+ "emmo.OrdinalQuantity",
+ "emmo.BaseQuantity",
+ "emmo.PhysicalConstant",
+ "emmo.PhysicalQuantity",
+ "emmo.ExactConstant",
+ "emmo.MeasuredConstant",
+ "emmo.DerivedQuantity",
+ "emmo.ISQBaseQuantity",
+ "emmo.InternationalSystemOfQuantity",
+ "emmo.ISQDerivedQuantity",
+ "emmo.SIExactConstant",
+ "emmo.NonSIUnits",
+ "emmo.StandardizedPhysicalQuantity",
+ "emmo.CategorizedPhysicalQuantity",
+ "emmo.AtomicAndNuclear",
+ "emmo.Defined",
+ "emmo.Electromagnetic",
+ "emmo.FrequentlyUsed",
+ "emmo.PhysicoChemical",
+ "emmo.ChemicalCompositionQuantity",
+ "emmo.Universal",
+ )
+ )
+ if not hasattr(self.onto, "PhysicalQuantity"):
+ return
+ exceptions.update(
+ self.get_config("test_quantity_dimension.exceptions", ())
+ )
+ regex = re.compile(
+ "^T([+-][1-9]|0) L([+-][1-9]|0) M([+-][1-9]|0) I([+-][1-9]|0) "
+ "(H|Θ)([+-][1-9]|0) N([+-][1-9]|0) J([+-][1-9]|0)$"
+ )
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.PhysicalQuantity.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ if repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ anno = cls.get_annotations()
+ self.assertIn("physicalDimension", anno, msg=cls)
+ physdim = anno["physicalDimension"].first()
+ self.assertRegex(physdim, regex, msg=cls)
+
test_unit_dimension(self)
+
+
+¶Check that all measurement units have a physical dimension.
+Configurations
+exceptions - full class names of classes to ignore.
+emmopy/emmocheck.py
def test_unit_dimension(self):
+ """Check that all measurement units have a physical dimension.
+
+ Configurations:
+ exceptions - full class names of classes to ignore.
+ """
+ exceptions = set(
+ (
+ "metrology.MultipleUnit",
+ "metrology.SubMultipleUnit",
+ "metrology.OffSystemUnit",
+ "metrology.PrefixedUnit",
+ "metrology.NonPrefixedUnit",
+ "metrology.SpecialUnit",
+ "metrology.DerivedUnit",
+ "metrology.BaseUnit",
+ "metrology.UnitSymbol",
+ "siunits.SICoherentDerivedUnit",
+ "siunits.SINonCoherentDerivedUnit",
+ "siunits.SISpecialUnit",
+ "siunits.SICoherentUnit",
+ "siunits.SIPrefixedUnit",
+ "siunits.SIBaseUnit",
+ "siunits.SIUnitSymbol",
+ "siunits.SIUnit",
+ "emmo.MultipleUnit",
+ "emmo.SubMultipleUnit",
+ "emmo.OffSystemUnit",
+ "emmo.PrefixedUnit",
+ "emmo.NonPrefixedUnit",
+ "emmo.SpecialUnit",
+ "emmo.DerivedUnit",
+ "emmo.BaseUnit",
+ "emmo.UnitSymbol",
+ "emmo.SIAccepted",
+ "emmo.SICoherentDerivedUnit",
+ "emmo.SINonCoherentDerivedUnit",
+ "emmo.SISpecialUnit",
+ "emmo.SICoherentUnit",
+ "emmo.SIPrefixedUnit",
+ "emmo.SIBaseUnit",
+ "emmo.SIUnitSymbol",
+ "emmo.SIUnit",
+ )
+ )
+ if not hasattr(self.onto, "MeasurementUnit"):
+ return
+ exceptions.update(self.get_config("test_unit_dimension.exceptions", ()))
+ regex = re.compile(r"^(emmo|metrology).hasDimensionString.value\(.*\)$")
+ classes = set(self.onto.classes(self.check_imported))
+ for cls in self.onto.MeasurementUnit.descendants():
+ if not self.check_imported and cls not in classes:
+ continue
+ # Assume that actual units are not subclassed
+ if not list(cls.subclasses()) and repr(cls) not in exceptions:
+ with self.subTest(cls=cls, label=get_label(cls)):
+ self.assertTrue(
+ any(
+ regex.match(repr(r))
+ for r in cls.get_indirect_is_a()
+ ),
+ msg=cls,
+ )
+
+TestSyntacticEMMOConventions
+
+
+
+¶Test syntactic EMMO conventions.
+ +emmopy/emmocheck.py
class TestSyntacticEMMOConventions(TestEMMOConventions):
+ """Test syntactic EMMO conventions."""
+
+ def test_number_of_labels(self):
+ """Check that all entities have one and only one prefLabel.
+
+ Use "altLabel" for synonyms.
+
+ The only allowed exception is entities who's representation
+ starts with "owl.".
+ """
+ exceptions = set(
+ (
+ "terms.license",
+ "terms.abstract",
+ "terms.contributor",
+ "terms.creator",
+ "terms.publisher",
+ "terms.title",
+ "core.prefLabel",
+ "core.altLabel",
+ "core.hiddenLabel",
+ "foaf.logo",
+ "0.1.logo", # foaf.logo
+ )
+ )
+ exceptions.update(
+ self.get_config("test_number_of_labels.exceptions", ())
+ )
+ if (
+ "prefLabel"
+ in self.onto.world._props # pylint: disable=protected-access
+ ):
+ for entity in self.onto.classes(self.check_imported):
+ if repr(entity) not in exceptions:
+ with self.subTest(
+ entity=entity,
+ label=get_label(entity),
+ prefLabels=entity.prefLabel,
+ ):
+ if not repr(entity).startswith("owl."):
+ self.assertTrue(hasattr(entity, "prefLabel"))
+ self.assertEqual(1, len(entity.prefLabel))
+ else:
+ self.fail("ontology has no prefLabel")
+
+ def test_class_label(self):
+ """Check that class labels are CamelCase and valid identifiers.
+
+ For CamelCase, we are currently only checking that the labels
+ start with upper case.
+ """
+ exceptions = set(
+ (
+ "0-manifold", # not needed in 1.0.0-beta
+ "1-manifold",
+ "2-manifold",
+ "3-manifold",
+ "C++",
+ "3DPrinting",
+ )
+ )
+ exceptions.update(self.get_config("test_class_label.exceptions", ()))
+
+ for cls in self.onto.classes(self.check_imported):
+ for label in cls.label + getattr(cls, "prefLabel", []):
+ if str(label) not in exceptions:
+ with self.subTest(entity=cls, label=label):
+ self.assertTrue(label.isidentifier())
+ self.assertTrue(label[0].isupper())
+
+ def test_object_property_label(self):
+ """Check that object property labels are lowerCamelCase.
+
+ Allowed exceptions: "EMMORelation"
+
+ If they start with "has" or "is" they should be followed by a
+ upper case letter.
+
+ If they start with "is" they should also end with "Of".
+ """
+ exceptions = set(("EMMORelation",))
+ exceptions.update(
+ self.get_config("test_object_property_label.exceptions", ())
+ )
+
+ for obj_prop in self.onto.object_properties():
+ if repr(obj_prop) not in exceptions:
+ for label in obj_prop.label:
+ with self.subTest(entity=obj_prop, label=label):
+ self.assertTrue(
+ label[0].islower(), "label start with lowercase"
+ )
+ if label.startswith("has"):
+ self.assertTrue(
+ label[3].isupper(),
+ 'what follows "has" must be "uppercase"',
+ )
+ if label.startswith("is"):
+ self.assertTrue(
+ label[2].isupper(),
+ 'what follows "is" must be "uppercase"',
+ )
+ self.assertTrue(
+ label.endswith(("Of", "With")),
+ 'should end with "Of" or "With"',
+ )
+
test_class_label(self)
+
+
+¶Check that class labels are CamelCase and valid identifiers.
+For CamelCase, we are currently only checking that the labels +start with upper case.
+ +emmopy/emmocheck.py
def test_class_label(self):
+ """Check that class labels are CamelCase and valid identifiers.
+
+ For CamelCase, we are currently only checking that the labels
+ start with upper case.
+ """
+ exceptions = set(
+ (
+ "0-manifold", # not needed in 1.0.0-beta
+ "1-manifold",
+ "2-manifold",
+ "3-manifold",
+ "C++",
+ "3DPrinting",
+ )
+ )
+ exceptions.update(self.get_config("test_class_label.exceptions", ()))
+
+ for cls in self.onto.classes(self.check_imported):
+ for label in cls.label + getattr(cls, "prefLabel", []):
+ if str(label) not in exceptions:
+ with self.subTest(entity=cls, label=label):
+ self.assertTrue(label.isidentifier())
+ self.assertTrue(label[0].isupper())
+
test_number_of_labels(self)
+
+
+¶Check that all entities have one and only one prefLabel.
+Use "altLabel" for synonyms.
+The only allowed exception is entities who's representation +starts with "owl.".
+ +emmopy/emmocheck.py
def test_number_of_labels(self):
+ """Check that all entities have one and only one prefLabel.
+
+ Use "altLabel" for synonyms.
+
+ The only allowed exception is entities who's representation
+ starts with "owl.".
+ """
+ exceptions = set(
+ (
+ "terms.license",
+ "terms.abstract",
+ "terms.contributor",
+ "terms.creator",
+ "terms.publisher",
+ "terms.title",
+ "core.prefLabel",
+ "core.altLabel",
+ "core.hiddenLabel",
+ "foaf.logo",
+ "0.1.logo", # foaf.logo
+ )
+ )
+ exceptions.update(
+ self.get_config("test_number_of_labels.exceptions", ())
+ )
+ if (
+ "prefLabel"
+ in self.onto.world._props # pylint: disable=protected-access
+ ):
+ for entity in self.onto.classes(self.check_imported):
+ if repr(entity) not in exceptions:
+ with self.subTest(
+ entity=entity,
+ label=get_label(entity),
+ prefLabels=entity.prefLabel,
+ ):
+ if not repr(entity).startswith("owl."):
+ self.assertTrue(hasattr(entity, "prefLabel"))
+ self.assertEqual(1, len(entity.prefLabel))
+ else:
+ self.fail("ontology has no prefLabel")
+
test_object_property_label(self)
+
+
+¶Check that object property labels are lowerCamelCase.
+Allowed exceptions: "EMMORelation"
+If they start with "has" or "is" they should be followed by a +upper case letter.
+If they start with "is" they should also end with "Of".
+ +emmopy/emmocheck.py
def test_object_property_label(self):
+ """Check that object property labels are lowerCamelCase.
+
+ Allowed exceptions: "EMMORelation"
+
+ If they start with "has" or "is" they should be followed by a
+ upper case letter.
+
+ If they start with "is" they should also end with "Of".
+ """
+ exceptions = set(("EMMORelation",))
+ exceptions.update(
+ self.get_config("test_object_property_label.exceptions", ())
+ )
+
+ for obj_prop in self.onto.object_properties():
+ if repr(obj_prop) not in exceptions:
+ for label in obj_prop.label:
+ with self.subTest(entity=obj_prop, label=label):
+ self.assertTrue(
+ label[0].islower(), "label start with lowercase"
+ )
+ if label.startswith("has"):
+ self.assertTrue(
+ label[3].isupper(),
+ 'what follows "has" must be "uppercase"',
+ )
+ if label.startswith("is"):
+ self.assertTrue(
+ label[2].isupper(),
+ 'what follows "is" must be "uppercase"',
+ )
+ self.assertTrue(
+ label.endswith(("Of", "With")),
+ 'should end with "Of" or "With"',
+ )
+
main(argv=None)
+
+
+¶Run all checks on ontology iri
.
Default is 'http://emmo.info/emmo'.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
argv |
+ list |
+ List of arguments, similar to |
+ None |
+
emmopy/emmocheck.py
def main(
+ argv: list = None,
+): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
+ """Run all checks on ontology `iri`.
+
+ Default is 'http://emmo.info/emmo'.
+
+ Parameters:
+ argv: List of arguments, similar to `sys.argv[1:]`.
+ Mainly for testing purposes, since it allows one to invoke the tool
+ manually / through Python.
+
+ """
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument("iri", help="File name or URI to the ontology to test.")
+ parser.add_argument(
+ "--database",
+ "-d",
+ metavar="FILENAME",
+ default=":memory:",
+ help=(
+ "Load ontology from Owlready2 sqlite3 database. The `iri` argument"
+ " should in this case be the IRI of the ontology you want to "
+ "check."
+ ),
+ )
+ parser.add_argument(
+ "--local",
+ "-l",
+ action="store_true",
+ help=(
+ "Load imported ontologies locally. Their paths are specified in "
+ "Protègè catalog files or via the --path option. The IRI should "
+ "be a file name."
+ ),
+ )
+ parser.add_argument(
+ "--catalog-file",
+ default="catalog-v001.xml",
+ help=(
+ "Name of Protègè catalog file in the same folder as the ontology. "
+ "This option is used together with --local and defaults to "
+ '"catalog-v001.xml".'
+ ),
+ )
+ parser.add_argument(
+ "--path",
+ action="append",
+ default=[],
+ help=(
+ "Paths where imported ontologies can be found. May be provided as "
+ "a comma-separated string and/or with multiple --path options."
+ ),
+ )
+ parser.add_argument(
+ "--check-imported",
+ "-i",
+ action="store_true",
+ help="Whether to check imported ontologies.",
+ )
+ parser.add_argument(
+ "--verbose", "-v", action="store_true", help="Verbosity level."
+ )
+ parser.add_argument(
+ "--configfile",
+ "-c",
+ help="A yaml file with additional test configurations.",
+ )
+ parser.add_argument(
+ "--skip",
+ "-s",
+ action="append",
+ default=[],
+ help=(
+ "Shell pattern matching tests to skip. This option may be "
+ "provided multiple times."
+ ),
+ )
+ parser.add_argument(
+ "--enable",
+ "-e",
+ action="append",
+ default=[],
+ help=(
+ "Shell pattern matching tests to enable that have been skipped by "
+ "default or in the config file. This option may be provided "
+ "multiple times."
+ ),
+ )
+ parser.add_argument( # deprecated, replaced by --no-catalog
+ "--url-from-catalog",
+ "-u",
+ default=None,
+ action="store_true",
+ help="Get url from catalog file",
+ )
+ parser.add_argument(
+ "--no-catalog",
+ action="store_false",
+ dest="url_from_catalog",
+ default=None,
+ help="Whether to not read catalog file even if it exists.",
+ )
+ parser.add_argument(
+ "--ignore-namespace",
+ "-n",
+ action="append",
+ default=[],
+ help="Namespace to be ignored. Can be given multiple times",
+ )
+
+ # Options to pass forward to unittest
+ parser.add_argument(
+ "--buffer",
+ "-b",
+ dest="unittest",
+ action="append_const",
+ const="-b",
+ help=(
+ "The standard output and standard error streams are buffered "
+ "during the test run. Output during a passing test is discarded. "
+ "Output is echoed normally on test fail or error and is added to "
+ "the failure messages."
+ ),
+ )
+ parser.add_argument(
+ "--catch",
+ dest="unittest",
+ action="append_const",
+ const="-c",
+ help=(
+ "Control-C during the test run waits for the current test to end "
+ "and then reports all the results so far. A second control-C "
+ "raises the normal KeyboardInterrupt exception"
+ ),
+ )
+ parser.add_argument(
+ "--failfast",
+ "-f",
+ dest="unittest",
+ action="append_const",
+ const="-f",
+ help="Stop the test run on the first error or failure.",
+ )
+ try:
+ args = parser.parse_args(args=argv)
+ sys.argv[1:] = args.unittest if args.unittest else []
+ if args.verbose:
+ sys.argv.append("-v")
+ except SystemExit as exc:
+ sys.exit(exc.code) # Exit without traceback on invalid arguments
+
+ # Append to onto_path
+ for paths in args.path:
+ for path in paths.split(","):
+ if path not in onto_path:
+ onto_path.append(path)
+
+ # Load ontology
+ world = World(filename=args.database)
+ if args.database != ":memory:" and args.iri not in world.ontologies:
+ parser.error(
+ "The IRI argument should be one of the ontologies in "
+ "the database:\n " + "\n ".join(world.ontologies.keys())
+ )
+
+ onto = world.get_ontology(args.iri)
+ onto.load(
+ only_local=args.local,
+ url_from_catalog=args.url_from_catalog,
+ catalog_file=args.catalog_file,
+ )
+
+ # Store settings TestEMMOConventions
+ TestEMMOConventions.onto = onto
+ TestEMMOConventions.check_imported = args.check_imported
+ TestEMMOConventions.ignore_namespace = args.ignore_namespace
+
+ # Configure tests
+ verbosity = 2 if args.verbose else 1
+ if args.configfile:
+ import yaml # pylint: disable=import-outside-toplevel
+
+ with open(args.configfile, "rt") as handle:
+ TestEMMOConventions.config.update(
+ yaml.load(handle, Loader=yaml.SafeLoader)
+ )
+
+ # Run all subclasses of TestEMMOConventions as test suites
+ status = 0
+ for cls in TestEMMOConventions.__subclasses__():
+ # pylint: disable=cell-var-from-loop,undefined-loop-variable
+
+ suite = unittest.TestLoader().loadTestsFromTestCase(cls)
+
+ # Mark tests to be skipped
+ for test in suite:
+ name = test.id().split(".")[-1]
+ skipped = set( # skipped by default
+ [
+ "test_namespace",
+ "test_physical_quantity_dimension_annotation",
+ "test_quantity_dimension_beta3",
+ "test_physical_quantity_dimension",
+ ]
+ )
+ msg = {name: "skipped by default" for name in skipped}
+
+ # enable/skip tests from config file
+ for pattern in test.get_config("enable", ()):
+ if fnmatch.fnmatchcase(name, pattern):
+ skipped.remove(name)
+ for pattern in test.get_config("skip", ()):
+ if fnmatch.fnmatchcase(name, pattern):
+ skipped.add(name)
+ msg[name] = "skipped from config file"
+
+ # enable/skip from command line
+ for pattern in args.enable:
+ if fnmatch.fnmatchcase(name, pattern):
+ skipped.remove(name)
+ for pattern in args.skip:
+ if fnmatch.fnmatchcase(name, pattern):
+ skipped.add(name)
+ msg[name] = "skipped from command line"
+
+ if name in skipped:
+ setattr(test, "setUp", lambda: test.skipTest(msg.get(name, "")))
+
+ runner = TextTestRunner(verbosity=verbosity)
+ runner.resultclass.checkmode = True
+ result = runner.run(suite)
+ if result.failures:
+ status = 1
+
+ return status
+
emmopy.emmopy
¶Automagically retrieve the EMMO utilizing
+ontopy.get_ontology
.
get_emmo(inferred=True)
+
+
+¶Returns the current version of emmo.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
inferred |
+ Optional[bool] |
+ Whether to import the inferred version of emmo or not. +Default is True. |
+ True |
+
Returns:
+Type | +Description | +
---|---|
Ontology |
+ The loaded emmo ontology. |
+
emmopy/emmopy.py
def get_emmo(inferred: Optional[bool] = True) -> "Ontology":
+ """Returns the current version of emmo.
+
+ Args:
+ inferred: Whether to import the inferred version of emmo or not.
+ Default is True.
+
+ Returns:
+ The loaded emmo ontology.
+
+ """
+ name = "emmo-inferred" if inferred in [True, None] else "emmo"
+ return get_ontology(name).load(prefix_emmo=True)
+
ontopy.colortest
¶Print tests in colors.
+Adapted from https://github.com/meshy/colour-runner by Charlie Denton +License: MIT
+ + + +
+ColourTextTestResult (TestResult)
+
+
+
+
+¶A test result class that prints colour formatted text results to a stream.
+Based on https://github.com/python/cpython/blob/3.3/Lib/unittest/runner.py
+ +ontopy/colortest.py
class ColourTextTestResult(TestResult):
+ """
+ A test result class that prints colour formatted text results to a stream.
+
+ Based on https://github.com/python/cpython/blob/3.3/Lib/unittest/runner.py
+ """
+
+ formatter = formatters.Terminal256Formatter() # pylint: disable=no-member
+ lexer = Lexer()
+ separator1 = "=" * 70
+ separator2 = "-" * 70
+ indent = " " * 4
+ # if `checkmode` is true, simplified output will be generated with
+ # no traceback
+ checkmode = False
+ _terminal = Terminal()
+ colours = {
+ None: str,
+ "error": _terminal.bold_red,
+ "expected": _terminal.blue,
+ # "fail": _terminal.bold_yellow,
+ "fail": _terminal.bold_magenta,
+ "skip": str,
+ "success": _terminal.green,
+ "title": _terminal.blue,
+ "unexpected": _terminal.bold_red,
+ }
+
+ _test_class = None
+
+ def __init__(self, stream, descriptions, verbosity):
+ super().__init__(stream, descriptions, verbosity)
+ self.stream = stream
+ self.show_all = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getShortDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return self.indent + doc_first_line
+ return self.indent + test._testMethodName
+
+ def getLongDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return "\n".join((str(test), doc_first_line))
+ return str(test)
+
+ def getClassDescription(self, test):
+ test_class = test.__class__
+ doc = test_class.__doc__
+ if self.descriptions and doc:
+ return doc.split("\n")[0].strip()
+ return strclass(test_class)
+
+ def startTest(self, test):
+ super().startTest(test)
+ pos = 0
+ if self.show_all:
+ if self._test_class != test.__class__:
+ self._test_class = test.__class__
+ title = self.getClassDescription(test)
+ self.stream.writeln(self.colours["title"](title))
+ descr = self.getShortDescription(test)
+ self.stream.write(descr)
+ pos += len(descr)
+ self.stream.write(" " * (70 - pos))
+ # self.stream.write(' ' * (self._terminal.width - 10 - pos))
+ # self.stream.write(' ... ')
+ self.stream.flush()
+
+ def printResult(self, short, extended, colour_key=None):
+ colour = self.colours[colour_key]
+ if self.show_all:
+ self.stream.writeln(colour(extended))
+ elif self.dots:
+ self.stream.write(colour(short))
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super().addSuccess(test)
+ self.printResult(".", "ok", "success")
+
+ def addError(self, test, err):
+ super().addError(test, err)
+ self.printResult("E", "ERROR", "error")
+
+ def addFailure(self, test, err):
+ super().addFailure(test, err)
+ self.printResult("F", "FAIL", "fail")
+
+ def addSkip(self, test, reason):
+ super().addSkip(test, reason)
+ if self.checkmode:
+ self.printResult("s", "skipped", "skip")
+ else:
+ self.printResult("s", f"skipped {reason!r}", "skip")
+
+ def addExpectedFailure(self, test, err):
+ super().addExpectedFailure(test, err)
+ self.printResult("x", "expected failure", "expected")
+
+ def addUnexpectedSuccess(self, test):
+ super().addUnexpectedSuccess(test)
+ self.printResult("u", "unexpected success", "unexpected")
+
+ def printErrors(self):
+ if self.dots or self.show_all:
+ self.stream.writeln()
+ self.printErrorList("ERROR", self.errors)
+ self.printErrorList("FAIL", self.failures)
+
+ def printErrorList(self, flavour, errors):
+ colour = self.colours[flavour.lower()]
+
+ for test, err in errors:
+ if self.checkmode and flavour == "FAIL":
+ self.stream.writeln(self.separator1)
+ title = f"{flavour}: {test.shortDescription()}"
+ self.stream.writeln(colour(title))
+ self.stream.writeln(str(test))
+ if self.show_all:
+ self.stream.writeln(self.separator2)
+ lines = str(err).split("\n")
+ i = 1
+ for line in lines[1:]:
+ if line.startswith(" "):
+ i += 1
+ else:
+ break
+ self.stream.writeln(
+ highlight(
+ "\n".join(lines[i:]), self.lexer, self.formatter
+ )
+ )
+ else:
+ self.stream.writeln(self.separator1)
+ title = f"{flavour}: {self.getLongDescription(test)}"
+ self.stream.writeln(colour(title))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln(highlight(err, self.lexer, self.formatter))
+
addError(self, test, err)
+
+
+¶Called when an error has occurred. 'err' is a tuple of values as +returned by sys.exc_info().
+ +ontopy/colortest.py
def addError(self, test, err):
+ super().addError(test, err)
+ self.printResult("E", "ERROR", "error")
+
addExpectedFailure(self, test, err)
+
+
+¶Called when an expected failure/error occurred.
+ +ontopy/colortest.py
def addExpectedFailure(self, test, err):
+ super().addExpectedFailure(test, err)
+ self.printResult("x", "expected failure", "expected")
+
addFailure(self, test, err)
+
+
+¶Called when an error has occurred. 'err' is a tuple of values as +returned by sys.exc_info().
+ +ontopy/colortest.py
def addFailure(self, test, err):
+ super().addFailure(test, err)
+ self.printResult("F", "FAIL", "fail")
+
addSkip(self, test, reason)
+
+
+¶Called when a test is skipped.
+ +ontopy/colortest.py
def addSkip(self, test, reason):
+ super().addSkip(test, reason)
+ if self.checkmode:
+ self.printResult("s", "skipped", "skip")
+ else:
+ self.printResult("s", f"skipped {reason!r}", "skip")
+
addSuccess(self, test)
+
+
+¶Called when a test has completed successfully
+ +ontopy/colortest.py
def addSuccess(self, test):
+ super().addSuccess(test)
+ self.printResult(".", "ok", "success")
+
addUnexpectedSuccess(self, test)
+
+
+¶Called when a test was expected to fail, but succeed.
+ +ontopy/colortest.py
def addUnexpectedSuccess(self, test):
+ super().addUnexpectedSuccess(test)
+ self.printResult("u", "unexpected success", "unexpected")
+
printErrors(self)
+
+
+¶Called by TestRunner after test run
+ +ontopy/colortest.py
def printErrors(self):
+ if self.dots or self.show_all:
+ self.stream.writeln()
+ self.printErrorList("ERROR", self.errors)
+ self.printErrorList("FAIL", self.failures)
+
startTest(self, test)
+
+
+¶Called when the given test is about to be run
+ +ontopy/colortest.py
def startTest(self, test):
+ super().startTest(test)
+ pos = 0
+ if self.show_all:
+ if self._test_class != test.__class__:
+ self._test_class = test.__class__
+ title = self.getClassDescription(test)
+ self.stream.writeln(self.colours["title"](title))
+ descr = self.getShortDescription(test)
+ self.stream.write(descr)
+ pos += len(descr)
+ self.stream.write(" " * (70 - pos))
+ # self.stream.write(' ' * (self._terminal.width - 10 - pos))
+ # self.stream.write(' ... ')
+ self.stream.flush()
+
+ColourTextTestRunner (TextTestRunner)
+
+
+
+
+¶A test runner that uses colour in its output.
+ +ontopy/colortest.py
class ColourTextTestRunner(
+ TextTestRunner
+): # pylint: disable=too-few-public-methods
+ """A test runner that uses colour in its output."""
+
+ resultclass = ColourTextTestResult
+
+resultclass (TestResult)
+
+
+
+
+¶A test result class that prints colour formatted text results to a stream.
+Based on https://github.com/python/cpython/blob/3.3/Lib/unittest/runner.py
+ +ontopy/colortest.py
class ColourTextTestResult(TestResult):
+ """
+ A test result class that prints colour formatted text results to a stream.
+
+ Based on https://github.com/python/cpython/blob/3.3/Lib/unittest/runner.py
+ """
+
+ formatter = formatters.Terminal256Formatter() # pylint: disable=no-member
+ lexer = Lexer()
+ separator1 = "=" * 70
+ separator2 = "-" * 70
+ indent = " " * 4
+ # if `checkmode` is true, simplified output will be generated with
+ # no traceback
+ checkmode = False
+ _terminal = Terminal()
+ colours = {
+ None: str,
+ "error": _terminal.bold_red,
+ "expected": _terminal.blue,
+ # "fail": _terminal.bold_yellow,
+ "fail": _terminal.bold_magenta,
+ "skip": str,
+ "success": _terminal.green,
+ "title": _terminal.blue,
+ "unexpected": _terminal.bold_red,
+ }
+
+ _test_class = None
+
+ def __init__(self, stream, descriptions, verbosity):
+ super().__init__(stream, descriptions, verbosity)
+ self.stream = stream
+ self.show_all = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getShortDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return self.indent + doc_first_line
+ return self.indent + test._testMethodName
+
+ def getLongDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return "\n".join((str(test), doc_first_line))
+ return str(test)
+
+ def getClassDescription(self, test):
+ test_class = test.__class__
+ doc = test_class.__doc__
+ if self.descriptions and doc:
+ return doc.split("\n")[0].strip()
+ return strclass(test_class)
+
+ def startTest(self, test):
+ super().startTest(test)
+ pos = 0
+ if self.show_all:
+ if self._test_class != test.__class__:
+ self._test_class = test.__class__
+ title = self.getClassDescription(test)
+ self.stream.writeln(self.colours["title"](title))
+ descr = self.getShortDescription(test)
+ self.stream.write(descr)
+ pos += len(descr)
+ self.stream.write(" " * (70 - pos))
+ # self.stream.write(' ' * (self._terminal.width - 10 - pos))
+ # self.stream.write(' ... ')
+ self.stream.flush()
+
+ def printResult(self, short, extended, colour_key=None):
+ colour = self.colours[colour_key]
+ if self.show_all:
+ self.stream.writeln(colour(extended))
+ elif self.dots:
+ self.stream.write(colour(short))
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super().addSuccess(test)
+ self.printResult(".", "ok", "success")
+
+ def addError(self, test, err):
+ super().addError(test, err)
+ self.printResult("E", "ERROR", "error")
+
+ def addFailure(self, test, err):
+ super().addFailure(test, err)
+ self.printResult("F", "FAIL", "fail")
+
+ def addSkip(self, test, reason):
+ super().addSkip(test, reason)
+ if self.checkmode:
+ self.printResult("s", "skipped", "skip")
+ else:
+ self.printResult("s", f"skipped {reason!r}", "skip")
+
+ def addExpectedFailure(self, test, err):
+ super().addExpectedFailure(test, err)
+ self.printResult("x", "expected failure", "expected")
+
+ def addUnexpectedSuccess(self, test):
+ super().addUnexpectedSuccess(test)
+ self.printResult("u", "unexpected success", "unexpected")
+
+ def printErrors(self):
+ if self.dots or self.show_all:
+ self.stream.writeln()
+ self.printErrorList("ERROR", self.errors)
+ self.printErrorList("FAIL", self.failures)
+
+ def printErrorList(self, flavour, errors):
+ colour = self.colours[flavour.lower()]
+
+ for test, err in errors:
+ if self.checkmode and flavour == "FAIL":
+ self.stream.writeln(self.separator1)
+ title = f"{flavour}: {test.shortDescription()}"
+ self.stream.writeln(colour(title))
+ self.stream.writeln(str(test))
+ if self.show_all:
+ self.stream.writeln(self.separator2)
+ lines = str(err).split("\n")
+ i = 1
+ for line in lines[1:]:
+ if line.startswith(" "):
+ i += 1
+ else:
+ break
+ self.stream.writeln(
+ highlight(
+ "\n".join(lines[i:]), self.lexer, self.formatter
+ )
+ )
+ else:
+ self.stream.writeln(self.separator1)
+ title = f"{flavour}: {self.getLongDescription(test)}"
+ self.stream.writeln(colour(title))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln(highlight(err, self.lexer, self.formatter))
+
addError(self, test, err)
+
+
+¶Called when an error has occurred. 'err' is a tuple of values as +returned by sys.exc_info().
+ +ontopy/colortest.py
def addError(self, test, err):
+ super().addError(test, err)
+ self.printResult("E", "ERROR", "error")
+
addExpectedFailure(self, test, err)
+
+
+¶Called when an expected failure/error occurred.
+ +ontopy/colortest.py
def addExpectedFailure(self, test, err):
+ super().addExpectedFailure(test, err)
+ self.printResult("x", "expected failure", "expected")
+
addFailure(self, test, err)
+
+
+¶Called when an error has occurred. 'err' is a tuple of values as +returned by sys.exc_info().
+ +ontopy/colortest.py
def addFailure(self, test, err):
+ super().addFailure(test, err)
+ self.printResult("F", "FAIL", "fail")
+
addSkip(self, test, reason)
+
+
+¶Called when a test is skipped.
+ +ontopy/colortest.py
def addSkip(self, test, reason):
+ super().addSkip(test, reason)
+ if self.checkmode:
+ self.printResult("s", "skipped", "skip")
+ else:
+ self.printResult("s", f"skipped {reason!r}", "skip")
+
addSuccess(self, test)
+
+
+¶Called when a test has completed successfully
+ +ontopy/colortest.py
def addSuccess(self, test):
+ super().addSuccess(test)
+ self.printResult(".", "ok", "success")
+
addUnexpectedSuccess(self, test)
+
+
+¶Called when a test was expected to fail, but succeed.
+ +ontopy/colortest.py
def addUnexpectedSuccess(self, test):
+ super().addUnexpectedSuccess(test)
+ self.printResult("u", "unexpected success", "unexpected")
+
printErrors(self)
+
+
+¶Called by TestRunner after test run
+ +ontopy/colortest.py
def printErrors(self):
+ if self.dots or self.show_all:
+ self.stream.writeln()
+ self.printErrorList("ERROR", self.errors)
+ self.printErrorList("FAIL", self.failures)
+
startTest(self, test)
+
+
+¶Called when the given test is about to be run
+ +ontopy/colortest.py
def startTest(self, test):
+ super().startTest(test)
+ pos = 0
+ if self.show_all:
+ if self._test_class != test.__class__:
+ self._test_class = test.__class__
+ title = self.getClassDescription(test)
+ self.stream.writeln(self.colours["title"](title))
+ descr = self.getShortDescription(test)
+ self.stream.write(descr)
+ pos += len(descr)
+ self.stream.write(" " * (70 - pos))
+ # self.stream.write(' ' * (self._terminal.width - 10 - pos))
+ # self.stream.write(' ... ')
+ self.stream.flush()
+
Module from parsing an excelfile and creating an +ontology from it.
+The excelfile is read by pandas and the pandas +dataframe should have column names: +prefLabel, altLabel, Elucidation, Comments, Examples, +subClassOf, Relations.
+Note that correct case is mandatory.
+ + + +
+ExcelError (EMMOntoPyException)
+
+
+
+
+¶Raised on errors in Excel file.
+ +ontopy/excelparser.py
class ExcelError(EMMOntoPyException):
+ """Raised on errors in Excel file."""
+
create_ontology_from_excel(excelpath, concept_sheet_name='Concepts', metadata_sheet_name='Metadata', imports_sheet_name='ImportedOntologies', dataproperties_sheet_name='DataProperties', objectproperties_sheet_name='ObjectProperties', annotationproperties_sheet_name='AnnotationProperties', base_iri='http://emmo.info/emmo/domain/onto#', base_iri_from_metadata=True, imports=None, catalog=None, force=False, input_ontology=None)
+
+
+¶Creates an ontology from an Excel-file.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
excelpath |
+ str |
+ Path to Excel workbook. |
+ required | +
concept_sheet_name |
+ str |
+ Name of sheet where concepts are defined. +The second row of this sheet should contain column names that are +supported. Currently these are 'prefLabel','altLabel', +'Elucidation', 'Comments', 'Examples', 'subClassOf', 'Relations'. +Multiple entries are separated with ';'. |
+ 'Concepts' |
+
metadata_sheet_name |
+ str |
+ Name of sheet where metadata are defined.
+The first row contains column names 'Metadata name' and 'Value'
+Supported 'Metadata names' are: 'Ontology IRI',
+'Ontology vesion IRI', 'Ontology version Info', 'Title',
+'Abstract', 'License', 'Comment', 'Author', 'Contributor'.
+Multiple entries are separated with a semi-colon ( |
+ 'Metadata' |
+
imports_sheet_name |
+ str |
+ Name of sheet where imported ontologies are +defined. +Column name is 'Imported ontologies'. +Fully resolvable URL or path to imported ontologies provided one +per row. |
+ 'ImportedOntologies' |
+
dataproperties_sheet_name |
+ str |
+ Name of sheet where data properties are +defined. The second row of this sheet should contain column names +that are supported. Currently these are 'prefLabel','altLabel', +'Elucidation', 'Comments', 'Examples', 'subPropertyOf', +'Domain', 'Range', 'dijointWith', 'equivalentTo'. |
+ 'DataProperties' |
+
annotationproperties_sheet_name |
+ str |
+ Name of sheet where annotation +properties are defined. The second row of this sheet should contain +column names that are supported. Currently these are 'prefLabel', +'altLabel', 'Elucidation', 'Comments', 'Examples', 'subPropertyOf', +'Domain', 'Range'. |
+ 'AnnotationProperties' |
+
objectproperties_sheet_name |
+ str |
+ Name of sheet where object properties are +defined.The second row of this sheet should contain column names +that are supported. Currently these are 'prefLabel','altLabel', +'Elucidation', 'Comments', 'Examples', 'subPropertyOf', +'Domain', 'Range', 'inverseOf', 'dijointWith', 'equivalentTo'. |
+ 'ObjectProperties' |
+
base_iri |
+ str |
+ Base IRI of the new ontology. |
+ 'http://emmo.info/emmo/domain/onto#' |
+
base_iri_from_metadata |
+ bool |
+ Whether to use base IRI defined from metadata. |
+ True |
+
imports |
+ list |
+ List of imported ontologies. |
+ None |
+
catalog |
+ dict |
+ Imported ontologies with (name, full path) key/value-pairs. |
+ None |
+
force |
+ bool |
+ Forcibly make an ontology by skipping concepts +that are erroneously defined or other errors in the excel sheet. |
+ False |
+
input_ontology |
+ Optional[ontopy.ontology.Ontology] |
+ Ontology that should be updated. +Default is None, +which means that a completely new ontology is generated. +If an input_ontology to be updated is provided, +the metadata sheet in the excel sheet will not be considered. |
+ None |
+
Returns:
+Type | +Description | +
---|---|
A tuple with the |
+
|
+
ontopy/excelparser.py
def create_ontology_from_excel( # pylint: disable=too-many-arguments, too-many-locals
+ excelpath: str,
+ concept_sheet_name: str = "Concepts",
+ metadata_sheet_name: str = "Metadata",
+ imports_sheet_name: str = "ImportedOntologies",
+ dataproperties_sheet_name: str = "DataProperties",
+ objectproperties_sheet_name: str = "ObjectProperties",
+ annotationproperties_sheet_name: str = "AnnotationProperties",
+ base_iri: str = "http://emmo.info/emmo/domain/onto#",
+ base_iri_from_metadata: bool = True,
+ imports: list = None,
+ catalog: dict = None,
+ force: bool = False,
+ input_ontology: Union[ontopy.ontology.Ontology, None] = None,
+) -> Tuple[ontopy.ontology.Ontology, dict, dict]:
+ """
+ Creates an ontology from an Excel-file.
+
+ Arguments:
+ excelpath: Path to Excel workbook.
+ concept_sheet_name: Name of sheet where concepts are defined.
+ The second row of this sheet should contain column names that are
+ supported. Currently these are 'prefLabel','altLabel',
+ 'Elucidation', 'Comments', 'Examples', 'subClassOf', 'Relations'.
+ Multiple entries are separated with ';'.
+ metadata_sheet_name: Name of sheet where metadata are defined.
+ The first row contains column names 'Metadata name' and 'Value'
+ Supported 'Metadata names' are: 'Ontology IRI',
+ 'Ontology vesion IRI', 'Ontology version Info', 'Title',
+ 'Abstract', 'License', 'Comment', 'Author', 'Contributor'.
+ Multiple entries are separated with a semi-colon (`;`).
+ imports_sheet_name: Name of sheet where imported ontologies are
+ defined.
+ Column name is 'Imported ontologies'.
+ Fully resolvable URL or path to imported ontologies provided one
+ per row.
+ dataproperties_sheet_name: Name of sheet where data properties are
+ defined. The second row of this sheet should contain column names
+ that are supported. Currently these are 'prefLabel','altLabel',
+ 'Elucidation', 'Comments', 'Examples', 'subPropertyOf',
+ 'Domain', 'Range', 'dijointWith', 'equivalentTo'.
+ annotationproperties_sheet_name: Name of sheet where annotation
+ properties are defined. The second row of this sheet should contain
+ column names that are supported. Currently these are 'prefLabel',
+ 'altLabel', 'Elucidation', 'Comments', 'Examples', 'subPropertyOf',
+ 'Domain', 'Range'.
+ objectproperties_sheet_name: Name of sheet where object properties are
+ defined.The second row of this sheet should contain column names
+ that are supported. Currently these are 'prefLabel','altLabel',
+ 'Elucidation', 'Comments', 'Examples', 'subPropertyOf',
+ 'Domain', 'Range', 'inverseOf', 'dijointWith', 'equivalentTo'.
+ base_iri: Base IRI of the new ontology.
+ base_iri_from_metadata: Whether to use base IRI defined from metadata.
+ imports: List of imported ontologies.
+ catalog: Imported ontologies with (name, full path) key/value-pairs.
+ force: Forcibly make an ontology by skipping concepts
+ that are erroneously defined or other errors in the excel sheet.
+ input_ontology: Ontology that should be updated.
+ Default is None,
+ which means that a completely new ontology is generated.
+ If an input_ontology to be updated is provided,
+ the metadata sheet in the excel sheet will not be considered.
+
+
+ Returns:
+ A tuple with the:
+
+ * created ontology
+ * associated catalog of ontology names and resolvable path as dict
+ * a dictionary with lists of concepts that raise errors, with the
+ following keys:
+
+ - "already_defined": These are concepts (classes)
+ that are already in the
+ ontology, because they were already added in a
+ previous line of the excelfile/pandas dataframe, or because
+ it is already defined in an imported ontology with the same
+ base_iri as the newly created ontology.
+ - "in_imported_ontologies": Concepts (classes)
+ that are defined in the
+ excel, but already exist in the imported ontologies.
+ - "wrongly_defined": Concepts (classes) that are given an
+ invalid prefLabel (e.g. with a space in the name).
+ - "missing_subClassOf": Concepts (classes) that are missing
+ parents. These concepts are added directly under owl:Thing.
+ - "invalid_subClassOf": Concepts (classes) with invalidly
+ defined parents.
+ These concepts are added directly under owl:Thing.
+ - "nonadded_concepts": List of all concepts (classes) that are
+ not added,
+ either because the prefLabel is invalid, or because the
+ concept has already been added once or already exists in an
+ imported ontology.
+ - "obj_prop_already_defined": Object properties that are already
+ defined in the ontology.
+ - "obj_prop_in_imported_ontologies": Object properties that are
+ defined in the excel, but already exist in the imported
+ ontologies.
+ - "obj_prop_wrongly_defined": Object properties that are given
+ an invalid prefLabel (e.g. with a space in the name).
+ - "obj_prop_missing_subPropertyOf": Object properties that are
+ missing parents.
+ - "obj_prop_invalid_subPropertyOf": Object properties with
+ invalidly defined parents.
+ - "obj_prop_nonadded_entities": List of all object properties
+ that are not added, either because the prefLabel is invalid,
+ or because the concept has already been added once or
+ already exists in an imported ontology.
+ - "obj_prop_errors_in_properties": Object properties with
+ invalidly defined properties.
+ - "obj_prop_errors_in_range": Object properties with invalidly
+ defined range.
+ - "obj_prop_errors_in_domain": Object properties with invalidly
+ defined domain.
+ - "annot_prop_already_defined": Annotation properties that are
+ already defined in the ontology.
+ - "annot_prop_in_imported_ontologies": Annotation properties
+ that
+ are defined in the excel, but already exist in the imported
+ ontologies.
+ - "annot_prop_wrongly_defined": Annotation properties that are
+ given an invalid prefLabel (e.g. with a space in the name).
+ - "annot_prop_missing_subPropertyOf": Annotation properties that
+ are missing parents.
+ - "annot_prop_invalid_subPropertyOf": Annotation properties with
+ invalidly defined parents.
+ - "annot_prop_nonadded_entities": List of all annotation
+ properties that are not added, either because the prefLabel
+ is invalid, or because the concept has already been added
+ once or already exists in an imported ontology.
+ - "annot_prop_errors_in_properties": Annotation properties with
+ invalidly defined properties.
+ - "data_prop_already_defined": Data properties that are already
+ defined in the ontology.
+ - "data_prop_in_imported_ontologies": Data properties that are
+ defined in the excel, but already exist in the imported
+ ontologies.
+ - "data_prop_wrongly_defined": Data properties that are given
+ an invalid prefLabel (e.g. with a space in the name).
+ - "data_prop_missing_subPropertyOf": Data properties that are
+ missing parents.
+ - "data_prop_invalid_subPropertyOf": Data properties with
+ invalidly defined parents.
+ - "data_prop_nonadded_entities": List of all data properties
+ that are not added, either because the prefLabel is invalid,
+ or because the concept has already been added once or
+ already exists in an imported ontology.
+ - "data_prop_errors_in_properties": Data properties with
+ invalidly defined properties.
+ - "data_prop_errors_in_range": Data properties with invalidly
+ defined range.
+ - "data_prop_errors_in_domain": Data properties with invalidly
+ defined domain.
+
+ """
+ web_protocol = "http://", "https://", "ftp://"
+
+ def _relative_to_absolute_paths(path):
+ if isinstance(path, str):
+ if not path.startswith(web_protocol):
+ path = os.path.dirname(excelpath) + "/" + str(path)
+ return path
+
+ try:
+ imports = pd.read_excel(
+ excelpath, sheet_name=imports_sheet_name, skiprows=[1]
+ )
+ except ValueError:
+ imports = pd.DataFrame()
+ else:
+ # Strip leading and trailing white spaces in paths
+ imports.replace(r"^\s+", "", regex=True).replace(
+ r"\s+$", "", regex=True
+ )
+ # Set empty strings to nan
+ imports = imports.replace(r"^\s*$", np.nan, regex=True)
+ if "Imported ontologies" in imports.columns:
+ imports["Imported ontologies"] = imports[
+ "Imported ontologies"
+ ].apply(_relative_to_absolute_paths)
+
+ # Read datafile TODO: Some magic to identify the header row
+ conceptdata = pd.read_excel(
+ excelpath, sheet_name=concept_sheet_name, skiprows=[0, 2]
+ )
+ try:
+ objectproperties = pd.read_excel(
+ excelpath, sheet_name=objectproperties_sheet_name, skiprows=[0, 2]
+ )
+ if "prefLabel" not in objectproperties.columns:
+ warnings.warn(
+ "The 'prefLabel' column is missing in "
+ f"{objectproperties_sheet_name}. "
+ "New object properties will not be added to the ontology."
+ )
+ objectproperties = None
+ except ValueError:
+ warnings.warn(
+ f"No sheet named {objectproperties_sheet_name} found "
+ f"in {excelpath}. "
+ "New object properties will not be added to the ontology."
+ )
+ objectproperties = None
+ try:
+ annotationproperties = pd.read_excel(
+ excelpath,
+ sheet_name=annotationproperties_sheet_name,
+ skiprows=[0, 2],
+ )
+ if "prefLabel" not in annotationproperties.columns:
+ warnings.warn(
+ "The 'prefLabel' column is missing in "
+ f"{annotationproperties_sheet_name}. "
+ "New annotation properties will not be added to the ontology."
+ )
+ annotationproperties = None
+ except ValueError:
+ warnings.warn(
+ f"No sheet named {annotationproperties_sheet_name} "
+ f"found in {excelpath}. "
+ "New annotation properties will not be added to the ontology."
+ )
+ annotationproperties = None
+
+ try:
+ dataproperties = pd.read_excel(
+ excelpath, sheet_name=dataproperties_sheet_name, skiprows=[0, 2]
+ )
+ if "prefLabel" not in dataproperties.columns:
+ warnings.warn(
+ "The 'prefLabel' column is missing in "
+ f"{dataproperties_sheet_name}. "
+ "New data properties will not be added to the ontology."
+ )
+ dataproperties = None
+ except ValueError:
+ warnings.warn(
+ f"No sheet named {dataproperties_sheet_name} found in {excelpath}. "
+ "New data properties will not be added to the ontology."
+ )
+ dataproperties = None
+
+ metadata = pd.read_excel(excelpath, sheet_name=metadata_sheet_name)
+ return create_ontology_from_pandas(
+ data=conceptdata,
+ objectproperties=objectproperties,
+ dataproperties=dataproperties,
+ annotationproperties=annotationproperties,
+ metadata=metadata,
+ imports=imports,
+ base_iri=base_iri,
+ base_iri_from_metadata=base_iri_from_metadata,
+ catalog=catalog,
+ force=force,
+ input_ontology=input_ontology,
+ )
+
create_ontology_from_pandas(data, objectproperties, annotationproperties, dataproperties, metadata, imports, base_iri='http://emmo.info/emmo/domain/onto#', base_iri_from_metadata=True, catalog=None, force=False, input_ontology=None)
+
+
+¶Create an ontology from a pandas DataFrame.
+Check 'create_ontology_from_excel' for complete documentation.
+ +ontopy/excelparser.py
def create_ontology_from_pandas( # pylint:disable=too-many-locals,too-many-branches,too-many-statements,too-many-arguments
+ data: pd.DataFrame,
+ objectproperties: pd.DataFrame,
+ annotationproperties: pd.DataFrame,
+ dataproperties: pd.DataFrame,
+ metadata: pd.DataFrame,
+ imports: pd.DataFrame,
+ base_iri: str = "http://emmo.info/emmo/domain/onto#",
+ base_iri_from_metadata: bool = True,
+ catalog: dict = None,
+ force: bool = False,
+ input_ontology: Union[ontopy.ontology.Ontology, None] = None,
+) -> Tuple[ontopy.ontology.Ontology, dict]:
+ """
+ Create an ontology from a pandas DataFrame.
+
+ Check 'create_ontology_from_excel' for complete documentation.
+ """
+ # Get ontology to which new concepts should be added
+ if input_ontology:
+ onto = input_ontology
+ catalog = {}
+ else: # Create new ontology
+ onto, catalog = get_metadata_from_dataframe(
+ metadata, base_iri, imports=imports
+ )
+
+ # Set given or default base_iri if base_iri_from_metadata is False.
+ if not base_iri_from_metadata:
+ onto.base_iri = base_iri
+ # onto.sync_python_names()
+ # prefLabel, label, and altLabel
+ # are default label annotations
+ onto.set_default_label_annotations()
+ # Add object properties
+ if objectproperties is not None:
+ objectproperties = _clean_dataframe(objectproperties)
+ (
+ onto,
+ objectproperties_with_errors,
+ added_objprop_indices,
+ ) = _add_entities(
+ onto=onto,
+ data=objectproperties,
+ entitytype=owlready2.ObjectPropertyClass,
+ force=force,
+ )
+
+ if annotationproperties is not None:
+ annotationproperties = _clean_dataframe(annotationproperties)
+ (
+ onto,
+ annotationproperties_with_errors,
+ added_annotprop_indices,
+ ) = _add_entities(
+ onto=onto,
+ data=annotationproperties,
+ entitytype=owlready2.AnnotationPropertyClass,
+ force=force,
+ )
+
+ if dataproperties is not None:
+ dataproperties = _clean_dataframe(dataproperties)
+ (
+ onto,
+ dataproperties_with_errors,
+ added_dataprop_indices,
+ ) = _add_entities(
+ onto=onto,
+ data=dataproperties,
+ entitytype=owlready2.DataPropertyClass,
+ force=force,
+ )
+ onto.sync_attributes(
+ name_policy="uuid", name_prefix="EMMO_", class_docstring="elucidation"
+ )
+ # Clean up data frame with new concepts
+ data = _clean_dataframe(data)
+ # Add entities
+ onto, entities_with_errors, added_concept_indices = _add_entities(
+ onto=onto, data=data, entitytype=owlready2.ThingClass, force=force
+ )
+
+ # Add entity properties in a second loop
+ for index in added_concept_indices:
+ row = data.loc[index]
+ properties = row["Relations"]
+ if properties == "nan":
+ properties = None
+ if isinstance(properties, str):
+ try:
+ entity = onto.get_by_label(row["prefLabel"].strip())
+ except NoSuchLabelError:
+ pass
+ props = properties.split(";")
+ for prop in props:
+ try:
+ entity.is_a.append(evaluate(onto, prop.strip()))
+ except pyparsing.ParseException as exc:
+ warnings.warn(
+ # This is currently not tested
+ f"Error in Property assignment for: '{entity}'. "
+ f"Property to be Evaluated: '{prop}'. "
+ f"{exc}"
+ )
+ entities_with_errors["errors_in_properties"].append(
+ entity.name
+ )
+ except NoSuchLabelError as exc:
+ msg = (
+ f"Error in Property assignment for: {entity}. "
+ f"Property to be Evaluated: {prop}. "
+ f"{exc}"
+ )
+ if force is True:
+ warnings.warn(msg)
+ entities_with_errors["errors_in_properties"].append(
+ entity.name
+ )
+ else:
+ raise ExcelError(msg) from exc
+
+ # Add range and domain for object properties
+ if objectproperties is not None:
+ onto, objectproperties_with_errors = _add_range_domain(
+ onto=onto,
+ properties=objectproperties,
+ added_prop_indices=added_objprop_indices,
+ properties_with_errors=objectproperties_with_errors,
+ force=force,
+ )
+ for key, value in objectproperties_with_errors.items():
+ entities_with_errors["obj_prop_" + key] = value
+ # Add range and domain for annotation properties
+ if annotationproperties is not None:
+ onto, annotationproperties_with_errors = _add_range_domain(
+ onto=onto,
+ properties=annotationproperties,
+ added_prop_indices=added_annotprop_indices,
+ properties_with_errors=annotationproperties_with_errors,
+ force=force,
+ )
+ for key, value in annotationproperties_with_errors.items():
+ entities_with_errors["annot_prop_" + key] = value
+
+ # Add range and domain for data properties
+ if dataproperties is not None:
+ onto, dataproperties_with_errors = _add_range_domain(
+ onto=onto,
+ properties=dataproperties,
+ added_prop_indices=added_dataprop_indices,
+ properties_with_errors=dataproperties_with_errors,
+ force=force,
+ )
+ for key, value in dataproperties_with_errors.items():
+ entities_with_errors["data_prop_" + key] = value
+
+ # Synchronise Python attributes to ontology
+ onto.sync_attributes(
+ name_policy="uuid", name_prefix="EMMO_", class_docstring="elucidation"
+ )
+ onto.dir_label = False
+ entities_with_errors = {
+ key: set(value) for key, value in entities_with_errors.items()
+ }
+ return onto, catalog, entities_with_errors
+
get_metadata_from_dataframe(metadata, base_iri, base_iri_from_metadata=True, imports=None, catalog=None)
+
+
+¶Create ontology with metadata from pd.DataFrame
+ +ontopy/excelparser.py
def get_metadata_from_dataframe( # pylint: disable=too-many-locals,too-many-branches,too-many-statements
+ metadata: pd.DataFrame,
+ base_iri: str,
+ base_iri_from_metadata: bool = True,
+ imports: pd.DataFrame = None,
+ catalog: dict = None,
+) -> Tuple[ontopy.ontology.Ontology, dict]:
+ """Create ontology with metadata from pd.DataFrame"""
+
+ # base_iri from metadata if it exists and base_iri_from_metadata
+ if base_iri_from_metadata:
+ try:
+ base_iris = _parse_literal(metadata, "Ontology IRI", metadata=True)
+ if len(base_iris) > 1:
+ warnings.warn(
+ "More than one Ontology IRI given. The first was chosen."
+ )
+ base_iri = base_iris[0] + "#"
+ except (TypeError, ValueError, AttributeError, IndexError):
+ pass
+
+ # Create new ontology
+ onto = get_ontology(base_iri)
+
+ # Add imported ontologies
+ catalog = {} if catalog is None else catalog
+ locations = set()
+ for _, row in imports.iterrows():
+ # for location in imports:
+ location = row["Imported ontologies"]
+ if not pd.isna(location) and location not in locations:
+ imported = onto.world.get_ontology(location).load()
+ onto.imported_ontologies.append(imported)
+ catalog[imported.base_iri.rstrip("#/")] = location
+ try:
+ cat = read_catalog(location.rsplit("/", 1)[0])
+ catalog.update(cat)
+ except ReadCatalogError:
+ warnings.warn(f"Catalog for {imported} not found.")
+ locations.add(location)
+ # set defined prefix
+ if not pd.isna(row["prefix"]):
+ # set prefix for all ontologies with same 'base_iri_root'
+ if not pd.isna(row["base_iri_root"]):
+ onto.set_common_prefix(
+ iri_base=row["base_iri_root"], prefix=row["prefix"]
+ )
+ # If base_root not given, set prefix only to top ontology
+ else:
+ imported.prefix = row["prefix"]
+
+ with onto:
+ # Add title
+ try:
+ _add_literal(
+ metadata,
+ onto.metadata.title,
+ "Title",
+ metadata=True,
+ only_one=True,
+ )
+ except AttributeError:
+ pass
+
+ # Add license
+ try:
+ _add_literal(
+ metadata, onto.metadata.license, "License", metadata=True
+ )
+ except AttributeError:
+ pass
+
+ # Add authors/creators
+ try:
+ _add_literal(
+ metadata, onto.metadata.creator, "Author", metadata=True
+ )
+ except AttributeError:
+ pass
+
+ # Add contributors
+ try:
+ _add_literal(
+ metadata,
+ onto.metadata.contributor,
+ "Contributor",
+ metadata=True,
+ )
+ except AttributeError:
+ pass
+
+ # Add versionInfo
+ try:
+ _add_literal(
+ metadata,
+ onto.metadata.versionInfo,
+ "Ontology version Info",
+ metadata=True,
+ only_one=True,
+ )
+ except AttributeError:
+ pass
+ return onto, catalog
+
ontopy.factpluspluswrapper.factppgraph
¶
+FaCTPPGraph
+
+
+
+¶Class for running the FaCT++ reasoner (using OwlApiInterface) and +postprocessing the resulting inferred ontology.
+graph : owlapi.Graph instance + The graph to be inferred.
+ +ontopy/factpluspluswrapper/factppgraph.py
class FaCTPPGraph:
+ """Class for running the FaCT++ reasoner (using OwlApiInterface) and
+ postprocessing the resulting inferred ontology.
+
+ Parameters
+ ----------
+ graph : owlapi.Graph instance
+ The graph to be inferred.
+ """
+
+ def __init__(self, graph):
+ self.graph = graph
+ self._inferred = None
+ self._namespaces = None
+ self._base_iri = None
+
+ @property
+ def inferred(self):
+ """The current inferred graph."""
+ if self._inferred is None:
+ self._inferred = self.raw_inferred_graph()
+ return self._inferred
+
+ @property
+ def base_iri(self):
+ """Base iri of inferred ontology."""
+ if self._base_iri is None:
+ self._base_iri = URIRef(self.asserted_base_iri() + "-inferred")
+ return self._base_iri
+
+ @base_iri.setter
+ def base_iri(self, value):
+ """Assign inferred base iri."""
+ self._base_iri = URIRef(value)
+
+ @property
+ def namespaces(self):
+ """Namespaces defined in the original graph."""
+ if self._namespaces is None:
+ self._namespaces = dict(self.graph.namespaces()).copy()
+ self._namespaces[""] = self.base_iri
+ return self._namespaces
+
+ def asserted_base_iri(self):
+ """Returns the base iri or the original graph."""
+ return URIRef(dict(self.graph.namespaces()).get("", "").rstrip("#/"))
+
+ def raw_inferred_graph(self):
+ """Returns the raw non-postprocessed inferred ontology as a rdflib
+ graph."""
+ return OwlApiInterface().reason(self.graph)
+
+ def inferred_graph(self):
+ """Returns the postprocessed inferred graph."""
+ self.add_base_annotations()
+ self.set_namespace()
+ self.clean_base()
+ self.remove_nothing_is_nothing()
+ self.clean_ancestors()
+ return self.inferred
+
+ def add_base_annotations(self):
+ """Copy base annotations from original graph to the inferred graph."""
+ base = self.base_iri
+ inferred = self.inferred
+ for _, predicate, obj in self.graph.triples(
+ (self.asserted_base_iri(), None, None)
+ ):
+ if predicate == OWL.versionIRI:
+ version = obj.rsplit("/", 1)[-1]
+ obj = URIRef(f"{base}/{version}")
+ inferred.add((base, predicate, obj))
+
+ def set_namespace(self):
+ """Override namespace of inferred graph with the namespace of the
+ original graph.
+ """
+ inferred = self.inferred
+ for key, value in self.namespaces.items():
+ inferred.namespace_manager.bind(
+ key, value, override=True, replace=True
+ )
+
+ def clean_base(self):
+ """Remove all relations `s? a owl:Ontology` where `s?` is not
+ `base_iri`.
+ """
+ inferred = self.inferred
+ for (
+ subject,
+ predicate,
+ obj,
+ ) in inferred.triples( # pylint: disable=not-an-iterable
+ (None, RDF.type, OWL.Ontology)
+ ):
+ inferred.remove((subject, predicate, obj))
+ inferred.add((self.base_iri, RDF.type, OWL.Ontology))
+
+ def remove_nothing_is_nothing(self):
+ """Remove superfluid relation in inferred graph:
+
+ owl:Nothing rdfs:subClassOf owl:Nothing
+ """
+ triple = OWL.Nothing, RDFS.subClassOf, OWL.Nothing
+ inferred = self.inferred
+ if triple in inferred:
+ inferred.remove(triple)
+
+ def clean_ancestors(self):
+ """Remove redundant rdfs:subClassOf relations in inferred graph."""
+ inferred = self.inferred
+ for ( # pylint: disable=too-many-nested-blocks
+ subject
+ ) in inferred.subjects(RDF.type, OWL.Class):
+ if isinstance(subject, URIRef):
+ parents = set(
+ parent
+ for parent in inferred.objects(subject, RDFS.subClassOf)
+ if isinstance(parent, URIRef)
+ )
+ if len(parents) > 1:
+ for parent in parents:
+ ancestors = set(
+ inferred.transitive_objects(parent, RDFS.subClassOf)
+ )
+ for entity in parents:
+ if entity != parent and entity in ancestors:
+ triple = subject, RDFS.subClassOf, entity
+ if triple in inferred:
+ inferred.remove(triple)
+
base_iri
+
+
+ property
+ writable
+
+
+¶Base iri of inferred ontology.
+inferred
+
+
+ property
+ readonly
+
+
+¶The current inferred graph.
+namespaces
+
+
+ property
+ readonly
+
+
+¶Namespaces defined in the original graph.
+add_base_annotations(self)
+
+
+¶Copy base annotations from original graph to the inferred graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def add_base_annotations(self):
+ """Copy base annotations from original graph to the inferred graph."""
+ base = self.base_iri
+ inferred = self.inferred
+ for _, predicate, obj in self.graph.triples(
+ (self.asserted_base_iri(), None, None)
+ ):
+ if predicate == OWL.versionIRI:
+ version = obj.rsplit("/", 1)[-1]
+ obj = URIRef(f"{base}/{version}")
+ inferred.add((base, predicate, obj))
+
asserted_base_iri(self)
+
+
+¶Returns the base iri or the original graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def asserted_base_iri(self):
+ """Returns the base iri or the original graph."""
+ return URIRef(dict(self.graph.namespaces()).get("", "").rstrip("#/"))
+
clean_ancestors(self)
+
+
+¶Remove redundant rdfs:subClassOf relations in inferred graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def clean_ancestors(self):
+ """Remove redundant rdfs:subClassOf relations in inferred graph."""
+ inferred = self.inferred
+ for ( # pylint: disable=too-many-nested-blocks
+ subject
+ ) in inferred.subjects(RDF.type, OWL.Class):
+ if isinstance(subject, URIRef):
+ parents = set(
+ parent
+ for parent in inferred.objects(subject, RDFS.subClassOf)
+ if isinstance(parent, URIRef)
+ )
+ if len(parents) > 1:
+ for parent in parents:
+ ancestors = set(
+ inferred.transitive_objects(parent, RDFS.subClassOf)
+ )
+ for entity in parents:
+ if entity != parent and entity in ancestors:
+ triple = subject, RDFS.subClassOf, entity
+ if triple in inferred:
+ inferred.remove(triple)
+
clean_base(self)
+
+
+¶Remove all relations s? a owl:Ontology
where s?
is not
+base_iri
.
ontopy/factpluspluswrapper/factppgraph.py
def clean_base(self):
+ """Remove all relations `s? a owl:Ontology` where `s?` is not
+ `base_iri`.
+ """
+ inferred = self.inferred
+ for (
+ subject,
+ predicate,
+ obj,
+ ) in inferred.triples( # pylint: disable=not-an-iterable
+ (None, RDF.type, OWL.Ontology)
+ ):
+ inferred.remove((subject, predicate, obj))
+ inferred.add((self.base_iri, RDF.type, OWL.Ontology))
+
inferred_graph(self)
+
+
+¶Returns the postprocessed inferred graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def inferred_graph(self):
+ """Returns the postprocessed inferred graph."""
+ self.add_base_annotations()
+ self.set_namespace()
+ self.clean_base()
+ self.remove_nothing_is_nothing()
+ self.clean_ancestors()
+ return self.inferred
+
raw_inferred_graph(self)
+
+
+¶Returns the raw non-postprocessed inferred ontology as a rdflib +graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def raw_inferred_graph(self):
+ """Returns the raw non-postprocessed inferred ontology as a rdflib
+ graph."""
+ return OwlApiInterface().reason(self.graph)
+
remove_nothing_is_nothing(self)
+
+
+¶Remove superfluid relation in inferred graph:
+owl:Nothing rdfs:subClassOf owl:Nothing
+ +ontopy/factpluspluswrapper/factppgraph.py
def remove_nothing_is_nothing(self):
+ """Remove superfluid relation in inferred graph:
+
+ owl:Nothing rdfs:subClassOf owl:Nothing
+ """
+ triple = OWL.Nothing, RDFS.subClassOf, OWL.Nothing
+ inferred = self.inferred
+ if triple in inferred:
+ inferred.remove(triple)
+
set_namespace(self)
+
+
+¶Override namespace of inferred graph with the namespace of the +original graph.
+ +ontopy/factpluspluswrapper/factppgraph.py
def set_namespace(self):
+ """Override namespace of inferred graph with the namespace of the
+ original graph.
+ """
+ inferred = self.inferred
+ for key, value in self.namespaces.items():
+ inferred.namespace_manager.bind(
+ key, value, override=True, replace=True
+ )
+
+FactPPError
+
+
+
+¶Postprocessing error after reasoning with FaCT++.
+ +ontopy/factpluspluswrapper/factppgraph.py
class FactPPError:
+ """Postprocessing error after reasoning with FaCT++."""
+
Python interface to the FaCT++ Reasoner.
+This module is copied from the SimPhoNy project.
+Original author: Matthias Urban
+ + + +
+OwlApiInterface
+
+
+
+¶Interface to the FaCT++ reasoner via OWLAPI.
+ +ontopy/factpluspluswrapper/owlapi_interface.py
class OwlApiInterface:
+ """Interface to the FaCT++ reasoner via OWLAPI."""
+
+ def __init__(self):
+ """Initialize the interface."""
+
+ def reason(self, graph):
+ """Generate the inferred axioms for a given Graph.
+
+ Args:
+ graph (Graph): An rdflib graph to execute the reasoner on.
+
+ """
+ with tempfile.NamedTemporaryFile("wt") as tmpdir:
+ graph.serialize(tmpdir.name, format="xml")
+ return self._run(tmpdir.name, command="--run-reasoner")
+
+ def reason_files(self, *owl_files):
+ """Merge the given owl and generate the inferred axioms.
+
+ Args:
+ *owl_files (os.path): The owl files two merge.
+
+ """
+ return self._run(*owl_files, command="--run-reasoner")
+
+ def merge_files(self, *owl_files):
+ """Merge the given owl files and its import closure.
+
+ Args:
+ *owl_files (os.path): The owl files two merge.
+
+ """
+ return self._run(*owl_files, command="--merge-only")
+
+ @staticmethod
+ def _run(
+ *owl_files, command, output_file=None, return_graph=True
+ ) -> rdflib.Graph:
+ """Run the FaCT++ reasoner using a java command.
+
+ Args:
+ *owl_files (str): Path to the owl files to load.
+ command (str): Either --run-reasoner or --merge-only
+ output_file (str, optional): Where the output should be stored.
+ Defaults to None.
+ return_graph (bool, optional): Whether the result should be parsed
+ and returned. Defaults to True.
+
+ Returns:
+ The reasoned result.
+
+ """
+ java_base = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "java")
+ )
+ cmd = (
+ [
+ "java",
+ "-cp",
+ java_base + "/lib/jars/*",
+ "-Djava.library.path=" + java_base + "/lib/so",
+ "org.simphony.OntologyLoader",
+ ]
+ + [command]
+ + list(owl_files)
+ )
+ logger.info("Running Reasoner")
+ logger.debug("Command %s", cmd)
+ subprocess.run(cmd, check=True) # nosec
+
+ graph = None
+ if return_graph:
+ graph = rdflib.Graph()
+ graph.parse(RESULT_FILE)
+ if output_file:
+ os.rename(RESULT_FILE, output_file)
+ else:
+ os.remove(RESULT_FILE)
+ return graph
+
__init__(self)
+
+
+ special
+
+
+¶Initialize the interface.
+ +ontopy/factpluspluswrapper/owlapi_interface.py
def __init__(self):
+ """Initialize the interface."""
+
merge_files(self, *owl_files)
+
+
+¶Merge the given owl files and its import closure.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
*owl_files |
+ os.path |
+ The owl files two merge. |
+ () |
+
ontopy/factpluspluswrapper/owlapi_interface.py
def merge_files(self, *owl_files):
+ """Merge the given owl files and its import closure.
+
+ Args:
+ *owl_files (os.path): The owl files two merge.
+
+ """
+ return self._run(*owl_files, command="--merge-only")
+
reason(self, graph)
+
+
+¶Generate the inferred axioms for a given Graph.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
graph |
+ Graph |
+ An rdflib graph to execute the reasoner on. |
+ required | +
ontopy/factpluspluswrapper/owlapi_interface.py
def reason(self, graph):
+ """Generate the inferred axioms for a given Graph.
+
+ Args:
+ graph (Graph): An rdflib graph to execute the reasoner on.
+
+ """
+ with tempfile.NamedTemporaryFile("wt") as tmpdir:
+ graph.serialize(tmpdir.name, format="xml")
+ return self._run(tmpdir.name, command="--run-reasoner")
+
reason_files(self, *owl_files)
+
+
+¶Merge the given owl and generate the inferred axioms.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
*owl_files |
+ os.path |
+ The owl files two merge. |
+ () |
+
ontopy/factpluspluswrapper/owlapi_interface.py
def reason_files(self, *owl_files):
+ """Merge the given owl and generate the inferred axioms.
+
+ Args:
+ *owl_files (os.path): The owl files two merge.
+
+ """
+ return self._run(*owl_files, command="--run-reasoner")
+
reason_from_terminal()
+
+
+¶Run the reasoner from terminal.
+ +ontopy/factpluspluswrapper/owlapi_interface.py
def reason_from_terminal():
+ """Run the reasoner from terminal."""
+ parser = argparse.ArgumentParser(
+ description="Run the FaCT++ reasoner on the given OWL file. "
+ "Catalog files are used to load the import closure. "
+ "Then the reasoner is executed and the inferred triples are merged "
+ "with the asserted ones. If multiple OWL files are given, they are "
+ "merged beforehand"
+ )
+ parser.add_argument(
+ "owl_file", nargs="+", help="OWL file(s) to run the reasoner on."
+ )
+ parser.add_argument("output_file", help="Path to store inferred axioms to.")
+
+ args = parser.parse_args()
+ OwlApiInterface()._run( # pylint: disable=protected-access
+ *args.owl_file,
+ command="--run-reasoner",
+ return_graph=False,
+ output_file=args.output_file,
+ )
+
ontopy.factpluspluswrapper.syncfatpp
¶sync_reasoner_factpp(ontology_or_world=None, infer_property_values=False, debug=1)
+
+
+¶Run FaCT++ reasoner and load the inferred relations back into +the owlready2 triplestore.
+ontology_or_world : None | Ontology instance | World instance | list + Identifies the world to run the reasoner over. +infer_property_values : bool + Whether to also infer property values. +debug : bool + Whether to print debug info to standard output.
+ +ontopy/factpluspluswrapper/sync_factpp.py
def sync_reasoner_factpp(
+ ontology_or_world=None, infer_property_values=False, debug=1
+):
+ """Run FaCT++ reasoner and load the inferred relations back into
+ the owlready2 triplestore.
+
+ Parameters
+ ----------
+ ontology_or_world : None | Ontology instance | World instance | list
+ Identifies the world to run the reasoner over.
+ infer_property_values : bool
+ Whether to also infer property values.
+ debug : bool
+ Whether to print debug info to standard output.
+ """
+ # pylint: disable=too-many-locals,too-many-branches,too-many-statements
+ if isinstance(ontology_or_world, World):
+ world = ontology_or_world
+ elif isinstance(ontology_or_world, Ontology):
+ world = ontology_or_world.world
+ elif isinstance(ontology_or_world, Sequence):
+ world = ontology_or_world[0].world
+ else:
+ world = owlready2.default_world
+
+ if isinstance(ontology_or_world, Ontology):
+ ontology = ontology_or_world
+ elif CURRENT_NAMESPACES.get():
+ ontology = CURRENT_NAMESPACES.get()[-1].ontology
+ else:
+ ontology = world.get_ontology(_INFERRENCES_ONTOLOGY)
+
+ locked = world.graph.has_write_lock()
+ if locked:
+ world.graph.release_write_lock() # Not needed during reasoning
+
+ try:
+ if debug:
+ print("*** Prepare graph")
+ # Exclude owl:imports because they are not needed and can
+ # cause trouble when loading the inferred ontology
+ graph1 = rdflib.Graph()
+ for subject, predicate, obj in world.as_rdflib_graph().triples(
+ (None, None, None)
+ ):
+ if predicate != OWL.imports:
+ graph1.add((subject, predicate, obj))
+
+ if debug:
+ print("*** Run FaCT++ reasoner (and postprocess)")
+ graph2 = FaCTPPGraph(graph1).inferred_graph()
+
+ if debug:
+ print("*** Load inferred ontology")
+ # Check all rdfs:subClassOf relations in the inferred graph and add
+ # them to the world if they are missing
+ new_parents = defaultdict(list)
+ new_equivs = defaultdict(list)
+ entity_2_type = {}
+
+ for (
+ subject,
+ predicate,
+ obj,
+ ) in graph2.triples( # pylint: disable=not-an-iterable
+ (None, None, None)
+ ):
+ if (
+ isinstance(subject, URIRef)
+ and predicate in OWL_2_TYPE
+ and isinstance(obj, URIRef)
+ ):
+ s_storid = ontology._abbreviate(str(subject), False)
+ p_storid = ontology._abbreviate(str(predicate), False)
+ o_storid = ontology._abbreviate(str(obj), False)
+ if (
+ s_storid is not None
+ and p_storid is not None
+ and o_storid is not None
+ ):
+ if predicate in (
+ RDFS.subClassOf,
+ RDFS.subPropertyOf,
+ RDF.type,
+ ):
+ new_parents[s_storid].append(o_storid)
+ entity_2_type[s_storid] = OWL_2_TYPE[predicate]
+ else:
+ new_equivs[s_storid].append(o_storid)
+ entity_2_type[s_storid] = OWL_2_TYPE[predicate]
+
+ if infer_property_values:
+ inferred_obj_relations = []
+ # Hmm, does FaCT++ infer any property values?
+ # If not, remove the `infer_property_values` keyword argument.
+ raise NotImplementedError
+
+ finally:
+ if locked:
+ world.graph.acquire_write_lock() # re-lock when applying results
+
+ if debug:
+ print("*** Applying reasoning results")
+
+ _apply_reasoning_results(
+ world, ontology, debug, new_parents, new_equivs, entity_2_type
+ )
+ if infer_property_values:
+ _apply_inferred_obj_relations(
+ world, ontology, debug, inferred_obj_relations
+ )
+
A module for visualising ontologies using graphviz.
+ + + +
+OntoGraph
+
+
+
+¶Class for visualising an ontology.
+ontology : ontopy.Ontology instance
+ Ontology to visualize.
+root : None | graph.ALL | string | owlready2.ThingClass instance
+ Name or owlready2 entity of root node to plot subgraph
+ below. If root
is graph.ALL
, all classes will be included
+ in the subgraph.
+leaves : None | sequence
+ A sequence of leaf node names for generating sub-graphs.
+entities : None | sequence
+ A sequence of entities to add to the graph.
+relations : "all" | str | None | sequence
+ Sequence of relations to visualise. If "all", means to include
+ all relations.
+style : None | dict | "default"
+ A dict mapping the name of the different graphical elements
+ to dicts of dot graph attributes. Supported graphical elements
+ include:
+ - graphtype : "Digraph" | "Graph"
+ - graph : graph attributes (G)
+ - class : nodes for classes (N)
+ - root : additional attributes for root nodes (N)
+ - leaf : additional attributes for leaf nodes (N)
+ - defined_class : nodes for defined classes (N)
+ - class_construct : nodes for class constructs (N)
+ - individual : nodes for invididuals (N)
+ - object_property : nodes for object properties (N)
+ - data_property : nodes for data properties (N)
+ - annotation_property : nodes for annotation properties (N)
+ - added_node : nodes added because addnodes
is true (N)
+ - isA : edges for isA relations (E)
+ - not : edges for not class constructs (E)
+ - equivalent_to : edges for equivalent_to relations (E)
+ - disjoint_with : edges for disjoint_with relations (E)
+ - inverse_of : edges for inverse_of relations (E)
+ - default_relation : default edges relations and restrictions (E)
+ - relations : dict of styles for different relations (E)
+ - inverse : default edges for inverse relations (E)
+ - default_dataprop : default edges for data properties (E)
+ - nodes : attribute for individual nodes (N)
+ - edges : attribute for individual edges (E)
+ If style is None or "default", the default style is used.
+ See https://www.graphviz.org/doc/info/attrs.html
+edgelabels : None | bool | dict
+ Whether to add labels to the edges of the generated graph.
+ It is also possible to provide a dict mapping the
+ full labels (with cardinality stripped off for restrictions)
+ to some abbreviations.
+addnodes : bool
+ Whether to add missing target nodes in relations.
+addconstructs : bool
+ Whether to add nodes representing class constructs.
+included_namespaces : sequence
+ In combination with root
, only include classes with one of
+ the listed namespaces. If empty (the default), nothing is
+ excluded.
+included_ontologies : sequence
+ In combination with root
, only include classes defined in
+ one of the listed ontologies. If empty (default), nothing is
+ excluded.
+parents : int
+ Include parents
levels of parents.
+excluded_nodes : None | sequence
+ Sequence of labels of nodes to exclude.
+graph : None | pydot.Dot instance
+ Graphviz Digraph object to plot into. If None, a new graph object
+ is created using the keyword arguments.
+imported : bool
+ Whether to include imported classes if entities
is None.
+kwargs :
+ Passed to graphviz.Digraph.
ontopy/graph.py
class OntoGraph: # pylint: disable=too-many-instance-attributes
+ """Class for visualising an ontology.
+
+ Parameters
+ ----------
+ ontology : ontopy.Ontology instance
+ Ontology to visualize.
+ root : None | graph.ALL | string | owlready2.ThingClass instance
+ Name or owlready2 entity of root node to plot subgraph
+ below. If `root` is `graph.ALL`, all classes will be included
+ in the subgraph.
+ leaves : None | sequence
+ A sequence of leaf node names for generating sub-graphs.
+ entities : None | sequence
+ A sequence of entities to add to the graph.
+ relations : "all" | str | None | sequence
+ Sequence of relations to visualise. If "all", means to include
+ all relations.
+ style : None | dict | "default"
+ A dict mapping the name of the different graphical elements
+ to dicts of dot graph attributes. Supported graphical elements
+ include:
+ - graphtype : "Digraph" | "Graph"
+ - graph : graph attributes (G)
+ - class : nodes for classes (N)
+ - root : additional attributes for root nodes (N)
+ - leaf : additional attributes for leaf nodes (N)
+ - defined_class : nodes for defined classes (N)
+ - class_construct : nodes for class constructs (N)
+ - individual : nodes for invididuals (N)
+ - object_property : nodes for object properties (N)
+ - data_property : nodes for data properties (N)
+ - annotation_property : nodes for annotation properties (N)
+ - added_node : nodes added because `addnodes` is true (N)
+ - isA : edges for isA relations (E)
+ - not : edges for not class constructs (E)
+ - equivalent_to : edges for equivalent_to relations (E)
+ - disjoint_with : edges for disjoint_with relations (E)
+ - inverse_of : edges for inverse_of relations (E)
+ - default_relation : default edges relations and restrictions (E)
+ - relations : dict of styles for different relations (E)
+ - inverse : default edges for inverse relations (E)
+ - default_dataprop : default edges for data properties (E)
+ - nodes : attribute for individual nodes (N)
+ - edges : attribute for individual edges (E)
+ If style is None or "default", the default style is used.
+ See https://www.graphviz.org/doc/info/attrs.html
+ edgelabels : None | bool | dict
+ Whether to add labels to the edges of the generated graph.
+ It is also possible to provide a dict mapping the
+ full labels (with cardinality stripped off for restrictions)
+ to some abbreviations.
+ addnodes : bool
+ Whether to add missing target nodes in relations.
+ addconstructs : bool
+ Whether to add nodes representing class constructs.
+ included_namespaces : sequence
+ In combination with `root`, only include classes with one of
+ the listed namespaces. If empty (the default), nothing is
+ excluded.
+ included_ontologies : sequence
+ In combination with `root`, only include classes defined in
+ one of the listed ontologies. If empty (default), nothing is
+ excluded.
+ parents : int
+ Include `parents` levels of parents.
+ excluded_nodes : None | sequence
+ Sequence of labels of nodes to exclude.
+ graph : None | pydot.Dot instance
+ Graphviz Digraph object to plot into. If None, a new graph object
+ is created using the keyword arguments.
+ imported : bool
+ Whether to include imported classes if `entities` is None.
+ kwargs :
+ Passed to graphviz.Digraph.
+ """
+
+ def __init__( # pylint: disable=too-many-arguments,too-many-locals
+ self,
+ ontology,
+ root=None,
+ leaves=None,
+ entities=None,
+ relations="isA",
+ style=None,
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ included_namespaces=(),
+ included_ontologies=(),
+ parents=0,
+ excluded_nodes=None,
+ graph=None,
+ imported=False,
+ **kwargs,
+ ):
+ if style is None or style == "default":
+ style = _default_style
+
+ if graph is None:
+ graphtype = style.get("graphtype", "Digraph")
+ dotcls = getattr(graphviz, graphtype)
+ graph_attr = kwargs.pop("graph_attr", {})
+ for key, value in style.get("graph", {}).items():
+ graph_attr.setdefault(key, value)
+ self.dot = dotcls(graph_attr=graph_attr, **kwargs)
+ self.nodes = set()
+ self.edges = set()
+ else:
+ if ontology != graph.ontology:
+ raise ValueError(
+ "the same ontology must be used when extending a graph"
+ )
+ self.dot = graph.dot.copy()
+ self.nodes = graph.nodes.copy()
+ self.edges = graph.edges.copy()
+
+ self.ontology = ontology
+ self.relations = set(
+ [relations] if isinstance(relations, str) else relations
+ )
+ self.style = style
+ self.edgelabels = edgelabels
+ self.addnodes = addnodes
+ self.addconstructs = addconstructs
+ self.excluded_nodes = set(excluded_nodes) if excluded_nodes else set()
+ self.imported = imported
+
+ if root == ALL:
+ self.add_entities(
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ )
+ elif root:
+ self.add_branch(
+ root,
+ leaves,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+ if parents:
+ self.add_parents(
+ root,
+ levels=parents,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ )
+
+ if entities:
+ self.add_entities(
+ entities=entities,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ )
+
+ def add_entities( # pylint: disable=too-many-arguments
+ self,
+ entities=None,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ nodeattrs=None,
+ **attrs,
+ ):
+ """Adds a sequence of entities to the graph. If `entities` is None,
+ all classes are added to the graph.
+
+ `nodeattrs` is a dict mapping node names to are attributes for
+ dedicated nodes.
+ """
+ if entities is None:
+ entities = self.ontology.classes(imported=self.imported)
+ self.add_nodes(entities, nodeattrs=nodeattrs, **attrs)
+ self.add_edges(
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
+ def add_branch( # pylint: disable=too-many-arguments,too-many-locals
+ self,
+ root,
+ leaves=None,
+ include_leaves=True,
+ strict_leaves=False,
+ exclude=None,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ included_namespaces=(),
+ included_ontologies=(),
+ include_parents="closest",
+ **attrs,
+ ):
+ """Adds branch under `root` ending at any entity included in the
+ sequence `leaves`. If `include_leaves` is true, leaf classes are
+ also included."""
+ if leaves is None:
+ leaves = ()
+ classes = self.ontology.get_branch(
+ root=root,
+ leaves=leaves,
+ include_leaves=include_leaves,
+ strict_leaves=strict_leaves,
+ exclude=exclude,
+ )
+
+ classes = filter_classes(
+ classes,
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+
+ nodeattrs = {}
+ nodeattrs[get_label(root)] = self.style.get("root", {})
+ for leaf in leaves:
+ nodeattrs[get_label(leaf)] = self.style.get("leaf", {})
+
+ self.add_entities(
+ entities=classes,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ nodeattrs=nodeattrs,
+ **attrs,
+ )
+ closest_ancestors = False
+ ancestor_generations = None
+ if include_parents == "closest":
+ closest_ancestors = True
+ elif isinstance(include_parents, int):
+ ancestor_generations = include_parents
+ parents = self.ontology.get_ancestors(
+ classes,
+ closest=closest_ancestors,
+ generations=ancestor_generations,
+ strict=True,
+ )
+ if parents:
+ for parent in parents:
+ nodeattrs[get_label(parent)] = self.style.get("parent_node", {})
+ self.add_entities(
+ entities=parents,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ nodeattrs=nodeattrs,
+ **attrs,
+ )
+
+ def add_parents( # pylint: disable=too-many-arguments
+ self,
+ name,
+ levels=1,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ **attrs,
+ ):
+ """Add `levels` levels of strict parents of entity `name`."""
+
+ def addparents(entity, nodes, parents):
+ if nodes > 0:
+ for parent in entity.get_parents(strict=True):
+ parents.add(parent)
+ addparents(parent, nodes - 1, parents)
+
+ entity = self.ontology[name] if isinstance(name, str) else name
+ parents = set()
+ addparents(entity, levels, parents)
+ self.add_entities(
+ entities=parents,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
+ def add_node(self, name, nodeattrs=None, **attrs):
+ """Add node with given name. `attrs` are graphviz node attributes."""
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ if label not in self.nodes.union(self.excluded_nodes):
+ kwargs = self.get_node_attrs(
+ entity, nodeattrs=nodeattrs, attrs=attrs
+ )
+ if hasattr(entity, "iri"):
+ kwargs.setdefault("URL", entity.iri)
+ self.dot.node(label, label=label, **kwargs)
+ self.nodes.add(label)
+
+ def add_nodes(self, names, nodeattrs, **attrs):
+ """Add nodes with given names. `attrs` are graphviz node attributes."""
+ for name in names:
+ self.add_node(name, nodeattrs=nodeattrs, **attrs)
+
+ def add_edge(self, subject, predicate, obj, edgelabel=None, **attrs):
+ """Add edge corresponding for ``(subject, predicate, object)``
+ triplet."""
+ subject = subject if isinstance(subject, str) else get_label(subject)
+ predicate = (
+ predicate if isinstance(predicate, str) else get_label(predicate)
+ )
+ obj = obj if isinstance(obj, str) else get_label(obj)
+ if subject in self.excluded_nodes or obj in self.excluded_nodes:
+ return
+ if not isinstance(subject, str) or not isinstance(obj, str):
+ raise TypeError("`subject` and `object` must be strings")
+ if subject not in self.nodes:
+ raise RuntimeError(f'`subject` "{subject}" must have been added')
+ if obj not in self.nodes:
+ raise RuntimeError(f'`object` "{obj}" must have been added')
+ key = (subject, predicate, obj)
+ if key not in self.edges:
+ relations = self.style.get("relations", {})
+ rels = set(
+ self.ontology[_] for _ in relations if _ in self.ontology
+ )
+ if (edgelabel is None) and (
+ (predicate in rels) or (predicate == "isA")
+ ):
+ edgelabel = self.edgelabels
+ label = None
+ if edgelabel is None:
+ tokens = predicate.split()
+ if len(tokens) == 2 and tokens[1] in ("some", "only"):
+ label = f"{tokens[0]} {tokens[1]}"
+ elif len(tokens) == 3 and tokens[1] in (
+ "exactly",
+ "min",
+ "max",
+ ):
+ label = f"{tokens[0]} {tokens[1]} {tokens[2]}"
+ elif isinstance(edgelabel, str):
+ label = edgelabel
+ elif isinstance(edgelabel, dict):
+ label = edgelabel.get(predicate, predicate)
+ elif edgelabel:
+ label = predicate
+ kwargs = self.get_edge_attrs(predicate, attrs=attrs)
+ self.dot.edge(subject, obj, label=label, **kwargs)
+ self.edges.add(key)
+
+ def add_source_edges( # pylint: disable=too-many-arguments,too-many-branches
+ self,
+ source,
+ relations=None,
+ edgelabels=None,
+ addnodes=None,
+ addconstructs=None,
+ **attrs,
+ ):
+ """Adds all relations originating from entity `source` who's type
+ are listed in `relations`."""
+ if relations is None:
+ relations = self.relations
+ elif isinstance(relations, str):
+ relations = set([relations])
+ else:
+ relations = set(relations)
+
+ edgelabels = self.edgelabels if edgelabels is None else edgelabels
+ addconstructs = (
+ self.addconstructs if addconstructs is None else addconstructs
+ )
+
+ entity = self.ontology[source] if isinstance(source, str) else source
+ label = get_label(entity)
+ for relation in entity.is_a:
+ # isA
+ if isinstance(
+ relation, (owlready2.ThingClass, owlready2.ObjectPropertyClass)
+ ):
+ if "all" in relations or "isA" in relations:
+ rlabel = get_label(relation)
+ # FIXME - we actually want to include individuals...
+ if isinstance(entity, owlready2.Thing):
+ continue
+ if relation not in entity.get_parents(strict=True):
+ continue
+ if not self.add_missing_node(relation, addnodes=addnodes):
+ continue
+ self.add_edge(
+ subject=label,
+ predicate="isA",
+ obj=rlabel,
+ edgelabel=edgelabels,
+ **attrs,
+ )
+
+ # restriction
+ elif isinstance(relation, owlready2.Restriction):
+ rname = get_label(relation.property)
+ if "all" in relations or rname in relations:
+ rlabel = f"{rname} {typenames[relation.type]}"
+ if isinstance(relation.value, owlready2.ThingClass):
+ obj = get_label(relation.value)
+ if not self.add_missing_node(relation.value, addnodes):
+ continue
+ elif (
+ isinstance(relation.value, owlready2.ClassConstruct)
+ and self.addconstructs
+ ):
+ obj = self.add_class_construct(relation.value)
+ else:
+ continue
+ pred = asstring(
+ relation, exclude_object=True, ontology=self.ontology
+ )
+ self.add_edge(
+ label, pred, obj, edgelabel=edgelabels, **attrs
+ )
+
+ # inverse
+ if isinstance(relation, owlready2.Inverse):
+ if "all" in relations or "inverse" in relations:
+ rlabel = get_label(relation)
+ if not self.add_missing_node(relation, addnodes=addnodes):
+ continue
+ if relation not in entity.get_parents(strict=True):
+ continue
+ self.add_edge(
+ subject=label,
+ predicate="inverse",
+ obj=rlabel,
+ edgelabel=edgelabels,
+ **attrs,
+ )
+
+ def add_edges( # pylint: disable=too-many-arguments
+ self,
+ sources=None,
+ relations=None,
+ edgelabels=None,
+ addnodes=None,
+ addconstructs=None,
+ **attrs,
+ ):
+ """Adds all relations originating from entities `sources` who's type
+ are listed in `relations`. If `sources` is None, edges are added
+ between all current nodes."""
+ if sources is None:
+ sources = self.nodes
+ for source in sources.copy():
+ self.add_source_edges(
+ source,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
+ def add_missing_node(self, name, addnodes=None):
+ """Checks if `name` corresponds to a missing node and add it if
+ `addnodes` is true.
+
+ Returns true if the node exists or is added, false otherwise."""
+ addnodes = self.addnodes if addnodes is None else addnodes
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ if label not in self.nodes:
+ if addnodes:
+ self.add_node(entity, **self.style.get("added_node", {}))
+ else:
+ return False
+ return True
+
+ def add_class_construct(self, construct):
+ """Adds class construct and return its label."""
+ self.add_node(construct, **self.style.get("class_construct", {}))
+ label = get_label(construct)
+ if isinstance(construct, owlready2.Or):
+ for cls in construct.Classes:
+ clslabel = get_label(cls)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(cls)
+ if clslabel in self.nodes:
+ self.add_edge(get_label(cls), "isA", label)
+ elif isinstance(construct, owlready2.And):
+ for cls in construct.Classes:
+ clslabel = get_label(cls)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(cls)
+ if clslabel in self.nodes:
+ self.add_edge(label, "isA", get_label(cls))
+ elif isinstance(construct, owlready2.Not):
+ clslabel = get_label(construct.Class)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(construct.Class)
+ if clslabel in self.nodes:
+ self.add_edge(clslabel, "not", label)
+ # Neither and nor inverse constructs are
+ return label
+
+ def get_node_attrs(self, name, nodeattrs, attrs):
+ """Returns attributes for node or edge `name`. `attrs` overrides
+ the default style."""
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ # class
+ if isinstance(entity, owlready2.ThingClass):
+ if entity.is_defined:
+ kwargs = self.style.get("defined_class", {})
+ else:
+ kwargs = self.style.get("class", {})
+ # class construct
+ elif isinstance(entity, owlready2.ClassConstruct):
+ kwargs = self.style.get("class_construct", {})
+ # individual
+ elif isinstance(entity, owlready2.Thing):
+ kwargs = self.style.get("individual", {})
+ # object property
+ elif isinstance(entity, owlready2.ObjectPropertyClass):
+ kwargs = self.style.get("object_property", {})
+ # data property
+ elif isinstance(entity, owlready2.DataPropertyClass):
+ kwargs = self.style.get("data_property", {})
+ # annotation property
+ elif isinstance(entity, owlready2.AnnotationPropertyClass):
+ kwargs = self.style.get("annotation_property", {})
+ else:
+ raise TypeError(f"Unknown entity type: {entity!r}")
+ kwargs = kwargs.copy()
+ kwargs.update(self.style.get("nodes", {}).get(label, {}))
+ if nodeattrs:
+ kwargs.update(nodeattrs.get(label, {}))
+ kwargs.update(attrs)
+ return kwargs
+
+ def _relation_styles(
+ self, entity: ThingClass, relations: dict, rels: set
+ ) -> dict:
+ """Helper function that returns the styles of the relations
+ to be used.
+
+ Parameters:
+ entity: the entity of the parent relation
+ relations: relations with default styles
+ rels: relations to be considered that have default styles,
+ either for the prefLabel or one of the altLabels
+ """
+ for relation in entity.mro():
+ if relation in rels:
+ if str(get_label(relation)) in relations:
+ rattrs = relations[str(get_label(relation))]
+ else:
+ for alt_label in relation.get_annotations()["altLabel"]:
+ rattrs = relations[str(alt_label)]
+
+ break
+ else:
+ warnings.warn(
+ f"Style not defined for relation {get_label(entity)}. "
+ "Resorting to default style."
+ )
+ rattrs = self.style.get("default_relation", {})
+ return rattrs
+
+ def get_edge_attrs(self, predicate: str, attrs: dict) -> dict:
+ """Returns attributes for node or edge `predicate`. `attrs` overrides
+ the default style.
+
+ Parameters:
+ predicate: predicate to get attributes for
+ attrs: desired attributes to override default
+ """
+ # given type
+ types = ("isA", "equivalent_to", "disjoint_with", "inverse_of")
+ if predicate in types:
+ kwargs = self.style.get(predicate, {}).copy()
+ else:
+ kwargs = {}
+ name = predicate.split(None, 1)[0]
+ match = re.match(r"Inverse\((.*)\)", name)
+ if match:
+ (name,) = match.groups()
+ attrs = attrs.copy()
+ for key, value in self.style.get("inverse", {}).items():
+ attrs.setdefault(key, value)
+ if not isinstance(name, str) or name in self.ontology:
+ entity = self.ontology[name] if isinstance(name, str) else name
+ relations = self.style.get("relations", {})
+ rels = set(
+ self.ontology[_] for _ in relations if _ in self.ontology
+ )
+ rattrs = self._relation_styles(entity, relations, rels)
+
+ # object property
+ if isinstance(
+ entity,
+ (owlready2.ObjectPropertyClass, owlready2.ObjectProperty),
+ ):
+ kwargs = self.style.get("default_relation", {}).copy()
+ kwargs.update(rattrs)
+ # data property
+ elif isinstance(
+ entity,
+ (owlready2.DataPropertyClass, owlready2.DataProperty),
+ ):
+ kwargs = self.style.get("default_dataprop", {}).copy()
+ kwargs.update(rattrs)
+ else:
+ raise TypeError(f"Unknown entity type: {entity!r}")
+ kwargs.update(self.style.get("edges", {}).get(predicate, {}))
+ kwargs.update(attrs)
+ return kwargs
+
+ def add_legend(self, relations=None):
+ """Adds legend for specified relations to the graph.
+
+ If `relations` is "all", the legend will contain all relations
+ that are defined in the style. By default the legend will
+ only contain relations that are currently included in the
+ graph.
+
+ Hence, you usually want to call add_legend() as the last method
+ before saving or displaying.
+
+ Relations with defined style will be bold in legend.
+ Relations that have inherited style from parent relation
+ will not be bold.
+ """
+ rels = self.style.get("relations", {})
+ if relations is None:
+ relations = self.get_relations(sort=True)
+ elif relations == "all":
+ relations = ["isA"] + list(rels.keys()) + ["inverse"]
+ elif isinstance(relations, str):
+ relations = relations.split(",")
+
+ nrelations = len(relations)
+ if nrelations == 0:
+ return
+
+ table = (
+ '<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">'
+ )
+ label1 = [table]
+ label2 = [table]
+ for index, relation in enumerate(relations):
+ if (relation in rels) or (relation == "isA"):
+ label1.append(
+ f'<tr><td align="right" '
+ f'port="i{index}"><b>{relation}</b></td></tr>'
+ )
+ else:
+ label1.append(
+ f'<tr><td align="right" '
+ f'port="i{index}">{relation}</td></tr>'
+ )
+ label2.append(f'<tr><td port="i{index}"> </td></tr>')
+ label1.append("</table>>")
+ label2.append("</table>>")
+ self.dot.node("key1", label="\n".join(label1), shape="plaintext")
+ self.dot.node("key2", label="\n".join(label2), shape="plaintext")
+
+ rankdir = self.dot.graph_attr.get("rankdir", "TB")
+ constraint = "false" if rankdir in ("TB", "BT") else "true"
+ inv = rankdir in ("BT",)
+
+ for index in range(nrelations):
+ relation = (
+ relations[nrelations - 1 - index] if inv else relations[index]
+ )
+ if relation == "inverse":
+ kwargs = self.style.get("inverse", {}).copy()
+ else:
+ kwargs = self.get_edge_attrs(relation, {}).copy()
+ kwargs["constraint"] = constraint
+ with self.dot.subgraph(name=f"sub{index}") as subgraph:
+ subgraph.attr(rank="same")
+ if rankdir in ("BT", "LR"):
+ self.dot.edge(
+ f"key1:i{index}:e", f"key2:i{index}:w", **kwargs
+ )
+ else:
+ self.dot.edge(
+ f"key2:i{index}:w", f"key1:i{index}:e", **kwargs
+ )
+
+ def get_relations(self, sort=True):
+ """Returns a set of relations in current graph. If `sort` is true,
+ a sorted list is returned."""
+ relations = set()
+ for _, predicate, _ in self.edges:
+ if predicate.startswith("Inverse"):
+ relations.add("inverse")
+ match = re.match(r"Inverse\((.+)\)", predicate)
+ if match is None:
+ raise ValueError(
+ "Could unexpectedly not find the inverse relation "
+ f"just added in: {predicate}"
+ )
+ relations.add(match.groups()[0])
+ else:
+ relations.add(predicate.split(None, 1)[0])
+
+ # Sort, but place 'isA' first and 'inverse' last
+ if sort:
+ start, end = [], []
+ if "isA" in relations:
+ relations.remove("isA")
+ start.append("isA")
+ if "inverse" in relations:
+ relations.remove("inverse")
+ end.append("inverse")
+ relations = start + sorted(relations) + end
+
+ return relations
+
+ def save(self, filename, fmt=None, **kwargs):
+ """Saves graph to `filename`. If format is not given, it is
+ inferred from `filename`."""
+ base = os.path.splitext(filename)[0]
+ fmt = get_format(filename, default="svg", fmt=fmt)
+ kwargs.setdefault("cleanup", True)
+ if fmt in ("graphviz", "gv"):
+ if "dictionary" in kwargs:
+ self.dot.save(filename, dictionary=kwargs["dictionary"])
+ else:
+ self.dot.save(filename)
+ else:
+ fmt = kwargs.pop("format", fmt)
+ self.dot.render(base, format=fmt, **kwargs)
+
+ def view(self):
+ """Shows the graph in a viewer."""
+ self.dot.view(cleanup=True)
+
+ def get_figsize(self):
+ """Returns the default figure size (width, height) in points."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpfile = os.path.join(tmpdir, "graph.svg")
+ self.save(tmpfile)
+ xml = ET.parse(tmpfile)
+ svg = xml.getroot()
+ width = svg.attrib["width"]
+ height = svg.attrib["height"]
+ if not width.endswith("pt"):
+ # ensure that units are in points
+ raise ValueError(
+ "The width attribute should always be given in 'pt', "
+ f"but it is: {width}"
+ )
+
+ def asfloat(string):
+ return float(re.match(r"^[\d.]+", string).group())
+
+ return asfloat(width), asfloat(height)
+
add_branch(self, root, leaves=None, include_leaves=True, strict_leaves=False, exclude=None, relations='isA', edgelabels=None, addnodes=False, addconstructs=False, included_namespaces=(), included_ontologies=(), include_parents='closest', **attrs)
+
+
+¶Adds branch under root
ending at any entity included in the
+sequence leaves
. If include_leaves
is true, leaf classes are
+also included.
ontopy/graph.py
def add_branch( # pylint: disable=too-many-arguments,too-many-locals
+ self,
+ root,
+ leaves=None,
+ include_leaves=True,
+ strict_leaves=False,
+ exclude=None,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ included_namespaces=(),
+ included_ontologies=(),
+ include_parents="closest",
+ **attrs,
+):
+ """Adds branch under `root` ending at any entity included in the
+ sequence `leaves`. If `include_leaves` is true, leaf classes are
+ also included."""
+ if leaves is None:
+ leaves = ()
+ classes = self.ontology.get_branch(
+ root=root,
+ leaves=leaves,
+ include_leaves=include_leaves,
+ strict_leaves=strict_leaves,
+ exclude=exclude,
+ )
+
+ classes = filter_classes(
+ classes,
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+
+ nodeattrs = {}
+ nodeattrs[get_label(root)] = self.style.get("root", {})
+ for leaf in leaves:
+ nodeattrs[get_label(leaf)] = self.style.get("leaf", {})
+
+ self.add_entities(
+ entities=classes,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ nodeattrs=nodeattrs,
+ **attrs,
+ )
+ closest_ancestors = False
+ ancestor_generations = None
+ if include_parents == "closest":
+ closest_ancestors = True
+ elif isinstance(include_parents, int):
+ ancestor_generations = include_parents
+ parents = self.ontology.get_ancestors(
+ classes,
+ closest=closest_ancestors,
+ generations=ancestor_generations,
+ strict=True,
+ )
+ if parents:
+ for parent in parents:
+ nodeattrs[get_label(parent)] = self.style.get("parent_node", {})
+ self.add_entities(
+ entities=parents,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ nodeattrs=nodeattrs,
+ **attrs,
+ )
+
add_class_construct(self, construct)
+
+
+¶Adds class construct and return its label.
+ +ontopy/graph.py
def add_class_construct(self, construct):
+ """Adds class construct and return its label."""
+ self.add_node(construct, **self.style.get("class_construct", {}))
+ label = get_label(construct)
+ if isinstance(construct, owlready2.Or):
+ for cls in construct.Classes:
+ clslabel = get_label(cls)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(cls)
+ if clslabel in self.nodes:
+ self.add_edge(get_label(cls), "isA", label)
+ elif isinstance(construct, owlready2.And):
+ for cls in construct.Classes:
+ clslabel = get_label(cls)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(cls)
+ if clslabel in self.nodes:
+ self.add_edge(label, "isA", get_label(cls))
+ elif isinstance(construct, owlready2.Not):
+ clslabel = get_label(construct.Class)
+ if clslabel not in self.nodes and self.addnodes:
+ self.add_node(construct.Class)
+ if clslabel in self.nodes:
+ self.add_edge(clslabel, "not", label)
+ # Neither and nor inverse constructs are
+ return label
+
add_edge(self, subject, predicate, obj, edgelabel=None, **attrs)
+
+
+¶Add edge corresponding for (subject, predicate, object)
+triplet.
ontopy/graph.py
def add_edge(self, subject, predicate, obj, edgelabel=None, **attrs):
+ """Add edge corresponding for ``(subject, predicate, object)``
+ triplet."""
+ subject = subject if isinstance(subject, str) else get_label(subject)
+ predicate = (
+ predicate if isinstance(predicate, str) else get_label(predicate)
+ )
+ obj = obj if isinstance(obj, str) else get_label(obj)
+ if subject in self.excluded_nodes or obj in self.excluded_nodes:
+ return
+ if not isinstance(subject, str) or not isinstance(obj, str):
+ raise TypeError("`subject` and `object` must be strings")
+ if subject not in self.nodes:
+ raise RuntimeError(f'`subject` "{subject}" must have been added')
+ if obj not in self.nodes:
+ raise RuntimeError(f'`object` "{obj}" must have been added')
+ key = (subject, predicate, obj)
+ if key not in self.edges:
+ relations = self.style.get("relations", {})
+ rels = set(
+ self.ontology[_] for _ in relations if _ in self.ontology
+ )
+ if (edgelabel is None) and (
+ (predicate in rels) or (predicate == "isA")
+ ):
+ edgelabel = self.edgelabels
+ label = None
+ if edgelabel is None:
+ tokens = predicate.split()
+ if len(tokens) == 2 and tokens[1] in ("some", "only"):
+ label = f"{tokens[0]} {tokens[1]}"
+ elif len(tokens) == 3 and tokens[1] in (
+ "exactly",
+ "min",
+ "max",
+ ):
+ label = f"{tokens[0]} {tokens[1]} {tokens[2]}"
+ elif isinstance(edgelabel, str):
+ label = edgelabel
+ elif isinstance(edgelabel, dict):
+ label = edgelabel.get(predicate, predicate)
+ elif edgelabel:
+ label = predicate
+ kwargs = self.get_edge_attrs(predicate, attrs=attrs)
+ self.dot.edge(subject, obj, label=label, **kwargs)
+ self.edges.add(key)
+
add_edges(self, sources=None, relations=None, edgelabels=None, addnodes=None, addconstructs=None, **attrs)
+
+
+¶Adds all relations originating from entities sources
who's type
+are listed in relations
. If sources
is None, edges are added
+between all current nodes.
ontopy/graph.py
def add_edges( # pylint: disable=too-many-arguments
+ self,
+ sources=None,
+ relations=None,
+ edgelabels=None,
+ addnodes=None,
+ addconstructs=None,
+ **attrs,
+):
+ """Adds all relations originating from entities `sources` who's type
+ are listed in `relations`. If `sources` is None, edges are added
+ between all current nodes."""
+ if sources is None:
+ sources = self.nodes
+ for source in sources.copy():
+ self.add_source_edges(
+ source,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
add_entities(self, entities=None, relations='isA', edgelabels=None, addnodes=False, addconstructs=False, nodeattrs=None, **attrs)
+
+
+¶Adds a sequence of entities to the graph. If entities
is None,
+all classes are added to the graph.
nodeattrs
is a dict mapping node names to are attributes for
+dedicated nodes.
ontopy/graph.py
def add_entities( # pylint: disable=too-many-arguments
+ self,
+ entities=None,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ nodeattrs=None,
+ **attrs,
+):
+ """Adds a sequence of entities to the graph. If `entities` is None,
+ all classes are added to the graph.
+
+ `nodeattrs` is a dict mapping node names to are attributes for
+ dedicated nodes.
+ """
+ if entities is None:
+ entities = self.ontology.classes(imported=self.imported)
+ self.add_nodes(entities, nodeattrs=nodeattrs, **attrs)
+ self.add_edges(
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
add_legend(self, relations=None)
+
+
+¶Adds legend for specified relations to the graph.
+If relations
is "all", the legend will contain all relations
+that are defined in the style. By default the legend will
+only contain relations that are currently included in the
+graph.
Hence, you usually want to call add_legend() as the last method +before saving or displaying.
+Relations with defined style will be bold in legend. +Relations that have inherited style from parent relation +will not be bold.
+ +ontopy/graph.py
def add_legend(self, relations=None):
+ """Adds legend for specified relations to the graph.
+
+ If `relations` is "all", the legend will contain all relations
+ that are defined in the style. By default the legend will
+ only contain relations that are currently included in the
+ graph.
+
+ Hence, you usually want to call add_legend() as the last method
+ before saving or displaying.
+
+ Relations with defined style will be bold in legend.
+ Relations that have inherited style from parent relation
+ will not be bold.
+ """
+ rels = self.style.get("relations", {})
+ if relations is None:
+ relations = self.get_relations(sort=True)
+ elif relations == "all":
+ relations = ["isA"] + list(rels.keys()) + ["inverse"]
+ elif isinstance(relations, str):
+ relations = relations.split(",")
+
+ nrelations = len(relations)
+ if nrelations == 0:
+ return
+
+ table = (
+ '<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">'
+ )
+ label1 = [table]
+ label2 = [table]
+ for index, relation in enumerate(relations):
+ if (relation in rels) or (relation == "isA"):
+ label1.append(
+ f'<tr><td align="right" '
+ f'port="i{index}"><b>{relation}</b></td></tr>'
+ )
+ else:
+ label1.append(
+ f'<tr><td align="right" '
+ f'port="i{index}">{relation}</td></tr>'
+ )
+ label2.append(f'<tr><td port="i{index}"> </td></tr>')
+ label1.append("</table>>")
+ label2.append("</table>>")
+ self.dot.node("key1", label="\n".join(label1), shape="plaintext")
+ self.dot.node("key2", label="\n".join(label2), shape="plaintext")
+
+ rankdir = self.dot.graph_attr.get("rankdir", "TB")
+ constraint = "false" if rankdir in ("TB", "BT") else "true"
+ inv = rankdir in ("BT",)
+
+ for index in range(nrelations):
+ relation = (
+ relations[nrelations - 1 - index] if inv else relations[index]
+ )
+ if relation == "inverse":
+ kwargs = self.style.get("inverse", {}).copy()
+ else:
+ kwargs = self.get_edge_attrs(relation, {}).copy()
+ kwargs["constraint"] = constraint
+ with self.dot.subgraph(name=f"sub{index}") as subgraph:
+ subgraph.attr(rank="same")
+ if rankdir in ("BT", "LR"):
+ self.dot.edge(
+ f"key1:i{index}:e", f"key2:i{index}:w", **kwargs
+ )
+ else:
+ self.dot.edge(
+ f"key2:i{index}:w", f"key1:i{index}:e", **kwargs
+ )
+
add_missing_node(self, name, addnodes=None)
+
+
+¶Checks if name
corresponds to a missing node and add it if
+addnodes
is true.
Returns true if the node exists or is added, false otherwise.
+ +ontopy/graph.py
def add_missing_node(self, name, addnodes=None):
+ """Checks if `name` corresponds to a missing node and add it if
+ `addnodes` is true.
+
+ Returns true if the node exists or is added, false otherwise."""
+ addnodes = self.addnodes if addnodes is None else addnodes
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ if label not in self.nodes:
+ if addnodes:
+ self.add_node(entity, **self.style.get("added_node", {}))
+ else:
+ return False
+ return True
+
add_node(self, name, nodeattrs=None, **attrs)
+
+
+¶Add node with given name. attrs
are graphviz node attributes.
ontopy/graph.py
def add_node(self, name, nodeattrs=None, **attrs):
+ """Add node with given name. `attrs` are graphviz node attributes."""
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ if label not in self.nodes.union(self.excluded_nodes):
+ kwargs = self.get_node_attrs(
+ entity, nodeattrs=nodeattrs, attrs=attrs
+ )
+ if hasattr(entity, "iri"):
+ kwargs.setdefault("URL", entity.iri)
+ self.dot.node(label, label=label, **kwargs)
+ self.nodes.add(label)
+
add_nodes(self, names, nodeattrs, **attrs)
+
+
+¶Add nodes with given names. attrs
are graphviz node attributes.
ontopy/graph.py
def add_nodes(self, names, nodeattrs, **attrs):
+ """Add nodes with given names. `attrs` are graphviz node attributes."""
+ for name in names:
+ self.add_node(name, nodeattrs=nodeattrs, **attrs)
+
add_parents(self, name, levels=1, relations='isA', edgelabels=None, addnodes=False, addconstructs=False, **attrs)
+
+
+¶Add levels
levels of strict parents of entity name
.
ontopy/graph.py
def add_parents( # pylint: disable=too-many-arguments
+ self,
+ name,
+ levels=1,
+ relations="isA",
+ edgelabels=None,
+ addnodes=False,
+ addconstructs=False,
+ **attrs,
+):
+ """Add `levels` levels of strict parents of entity `name`."""
+
+ def addparents(entity, nodes, parents):
+ if nodes > 0:
+ for parent in entity.get_parents(strict=True):
+ parents.add(parent)
+ addparents(parent, nodes - 1, parents)
+
+ entity = self.ontology[name] if isinstance(name, str) else name
+ parents = set()
+ addparents(entity, levels, parents)
+ self.add_entities(
+ entities=parents,
+ relations=relations,
+ edgelabels=edgelabels,
+ addnodes=addnodes,
+ addconstructs=addconstructs,
+ **attrs,
+ )
+
add_source_edges(self, source, relations=None, edgelabels=None, addnodes=None, addconstructs=None, **attrs)
+
+
+¶Adds all relations originating from entity source
who's type
+are listed in relations
.
ontopy/graph.py
def add_source_edges( # pylint: disable=too-many-arguments,too-many-branches
+ self,
+ source,
+ relations=None,
+ edgelabels=None,
+ addnodes=None,
+ addconstructs=None,
+ **attrs,
+):
+ """Adds all relations originating from entity `source` who's type
+ are listed in `relations`."""
+ if relations is None:
+ relations = self.relations
+ elif isinstance(relations, str):
+ relations = set([relations])
+ else:
+ relations = set(relations)
+
+ edgelabels = self.edgelabels if edgelabels is None else edgelabels
+ addconstructs = (
+ self.addconstructs if addconstructs is None else addconstructs
+ )
+
+ entity = self.ontology[source] if isinstance(source, str) else source
+ label = get_label(entity)
+ for relation in entity.is_a:
+ # isA
+ if isinstance(
+ relation, (owlready2.ThingClass, owlready2.ObjectPropertyClass)
+ ):
+ if "all" in relations or "isA" in relations:
+ rlabel = get_label(relation)
+ # FIXME - we actually want to include individuals...
+ if isinstance(entity, owlready2.Thing):
+ continue
+ if relation not in entity.get_parents(strict=True):
+ continue
+ if not self.add_missing_node(relation, addnodes=addnodes):
+ continue
+ self.add_edge(
+ subject=label,
+ predicate="isA",
+ obj=rlabel,
+ edgelabel=edgelabels,
+ **attrs,
+ )
+
+ # restriction
+ elif isinstance(relation, owlready2.Restriction):
+ rname = get_label(relation.property)
+ if "all" in relations or rname in relations:
+ rlabel = f"{rname} {typenames[relation.type]}"
+ if isinstance(relation.value, owlready2.ThingClass):
+ obj = get_label(relation.value)
+ if not self.add_missing_node(relation.value, addnodes):
+ continue
+ elif (
+ isinstance(relation.value, owlready2.ClassConstruct)
+ and self.addconstructs
+ ):
+ obj = self.add_class_construct(relation.value)
+ else:
+ continue
+ pred = asstring(
+ relation, exclude_object=True, ontology=self.ontology
+ )
+ self.add_edge(
+ label, pred, obj, edgelabel=edgelabels, **attrs
+ )
+
+ # inverse
+ if isinstance(relation, owlready2.Inverse):
+ if "all" in relations or "inverse" in relations:
+ rlabel = get_label(relation)
+ if not self.add_missing_node(relation, addnodes=addnodes):
+ continue
+ if relation not in entity.get_parents(strict=True):
+ continue
+ self.add_edge(
+ subject=label,
+ predicate="inverse",
+ obj=rlabel,
+ edgelabel=edgelabels,
+ **attrs,
+ )
+
get_edge_attrs(self, predicate, attrs)
+
+
+¶Returns attributes for node or edge predicate
. attrs
overrides
+the default style.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
predicate |
+ str |
+ predicate to get attributes for |
+ required | +
attrs |
+ dict |
+ desired attributes to override default |
+ required | +
ontopy/graph.py
def get_edge_attrs(self, predicate: str, attrs: dict) -> dict:
+ """Returns attributes for node or edge `predicate`. `attrs` overrides
+ the default style.
+
+ Parameters:
+ predicate: predicate to get attributes for
+ attrs: desired attributes to override default
+ """
+ # given type
+ types = ("isA", "equivalent_to", "disjoint_with", "inverse_of")
+ if predicate in types:
+ kwargs = self.style.get(predicate, {}).copy()
+ else:
+ kwargs = {}
+ name = predicate.split(None, 1)[0]
+ match = re.match(r"Inverse\((.*)\)", name)
+ if match:
+ (name,) = match.groups()
+ attrs = attrs.copy()
+ for key, value in self.style.get("inverse", {}).items():
+ attrs.setdefault(key, value)
+ if not isinstance(name, str) or name in self.ontology:
+ entity = self.ontology[name] if isinstance(name, str) else name
+ relations = self.style.get("relations", {})
+ rels = set(
+ self.ontology[_] for _ in relations if _ in self.ontology
+ )
+ rattrs = self._relation_styles(entity, relations, rels)
+
+ # object property
+ if isinstance(
+ entity,
+ (owlready2.ObjectPropertyClass, owlready2.ObjectProperty),
+ ):
+ kwargs = self.style.get("default_relation", {}).copy()
+ kwargs.update(rattrs)
+ # data property
+ elif isinstance(
+ entity,
+ (owlready2.DataPropertyClass, owlready2.DataProperty),
+ ):
+ kwargs = self.style.get("default_dataprop", {}).copy()
+ kwargs.update(rattrs)
+ else:
+ raise TypeError(f"Unknown entity type: {entity!r}")
+ kwargs.update(self.style.get("edges", {}).get(predicate, {}))
+ kwargs.update(attrs)
+ return kwargs
+
get_figsize(self)
+
+
+¶Returns the default figure size (width, height) in points.
+ +ontopy/graph.py
def get_figsize(self):
+ """Returns the default figure size (width, height) in points."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpfile = os.path.join(tmpdir, "graph.svg")
+ self.save(tmpfile)
+ xml = ET.parse(tmpfile)
+ svg = xml.getroot()
+ width = svg.attrib["width"]
+ height = svg.attrib["height"]
+ if not width.endswith("pt"):
+ # ensure that units are in points
+ raise ValueError(
+ "The width attribute should always be given in 'pt', "
+ f"but it is: {width}"
+ )
+
+ def asfloat(string):
+ return float(re.match(r"^[\d.]+", string).group())
+
+ return asfloat(width), asfloat(height)
+
get_node_attrs(self, name, nodeattrs, attrs)
+
+
+¶Returns attributes for node or edge name
. attrs
overrides
+the default style.
ontopy/graph.py
def get_node_attrs(self, name, nodeattrs, attrs):
+ """Returns attributes for node or edge `name`. `attrs` overrides
+ the default style."""
+ entity = self.ontology[name] if isinstance(name, str) else name
+ label = get_label(entity)
+ # class
+ if isinstance(entity, owlready2.ThingClass):
+ if entity.is_defined:
+ kwargs = self.style.get("defined_class", {})
+ else:
+ kwargs = self.style.get("class", {})
+ # class construct
+ elif isinstance(entity, owlready2.ClassConstruct):
+ kwargs = self.style.get("class_construct", {})
+ # individual
+ elif isinstance(entity, owlready2.Thing):
+ kwargs = self.style.get("individual", {})
+ # object property
+ elif isinstance(entity, owlready2.ObjectPropertyClass):
+ kwargs = self.style.get("object_property", {})
+ # data property
+ elif isinstance(entity, owlready2.DataPropertyClass):
+ kwargs = self.style.get("data_property", {})
+ # annotation property
+ elif isinstance(entity, owlready2.AnnotationPropertyClass):
+ kwargs = self.style.get("annotation_property", {})
+ else:
+ raise TypeError(f"Unknown entity type: {entity!r}")
+ kwargs = kwargs.copy()
+ kwargs.update(self.style.get("nodes", {}).get(label, {}))
+ if nodeattrs:
+ kwargs.update(nodeattrs.get(label, {}))
+ kwargs.update(attrs)
+ return kwargs
+
get_relations(self, sort=True)
+
+
+¶Returns a set of relations in current graph. If sort
is true,
+a sorted list is returned.
ontopy/graph.py
def get_relations(self, sort=True):
+ """Returns a set of relations in current graph. If `sort` is true,
+ a sorted list is returned."""
+ relations = set()
+ for _, predicate, _ in self.edges:
+ if predicate.startswith("Inverse"):
+ relations.add("inverse")
+ match = re.match(r"Inverse\((.+)\)", predicate)
+ if match is None:
+ raise ValueError(
+ "Could unexpectedly not find the inverse relation "
+ f"just added in: {predicate}"
+ )
+ relations.add(match.groups()[0])
+ else:
+ relations.add(predicate.split(None, 1)[0])
+
+ # Sort, but place 'isA' first and 'inverse' last
+ if sort:
+ start, end = [], []
+ if "isA" in relations:
+ relations.remove("isA")
+ start.append("isA")
+ if "inverse" in relations:
+ relations.remove("inverse")
+ end.append("inverse")
+ relations = start + sorted(relations) + end
+
+ return relations
+
save(self, filename, fmt=None, **kwargs)
+
+
+¶Saves graph to filename
. If format is not given, it is
+inferred from filename
.
ontopy/graph.py
def save(self, filename, fmt=None, **kwargs):
+ """Saves graph to `filename`. If format is not given, it is
+ inferred from `filename`."""
+ base = os.path.splitext(filename)[0]
+ fmt = get_format(filename, default="svg", fmt=fmt)
+ kwargs.setdefault("cleanup", True)
+ if fmt in ("graphviz", "gv"):
+ if "dictionary" in kwargs:
+ self.dot.save(filename, dictionary=kwargs["dictionary"])
+ else:
+ self.dot.save(filename)
+ else:
+ fmt = kwargs.pop("format", fmt)
+ self.dot.render(base, format=fmt, **kwargs)
+
view(self)
+
+
+¶Shows the graph in a viewer.
+ +ontopy/graph.py
def view(self):
+ """Shows the graph in a viewer."""
+ self.dot.view(cleanup=True)
+
check_module_dependencies(modules, verbose=True)
+
+
+¶Check module dependencies and return a copy of modules with +redundant dependencies removed.
+If verbose
is true, warnings are printed for each module that
If modules
is given, it should be a dict returned by
+get_module_dependencies().
ontopy/graph.py
def check_module_dependencies(modules, verbose=True):
+ """Check module dependencies and return a copy of modules with
+ redundant dependencies removed.
+
+ If `verbose` is true, warnings are printed for each module that
+
+ If `modules` is given, it should be a dict returned by
+ get_module_dependencies().
+ """
+ visited = set()
+
+ def get_deps(iri, excl=None):
+ """Returns a set with all dependencies of `iri`, excluding `excl` and
+ its dependencies."""
+ if iri in visited:
+ return set()
+ visited.add(iri)
+ deps = set()
+ for dependency in modules[iri]:
+ if dependency != excl:
+ deps.add(dependency)
+ deps.update(get_deps(dependency))
+ return deps
+
+ mods = {}
+ redundant = []
+ for iri, deps in modules.items():
+ if not deps:
+ mods[iri] = set()
+ for dep in deps:
+ if dep in get_deps(iri, dep):
+ redundant.append((iri, dep))
+ elif iri in mods:
+ mods[iri].add(dep)
+ else:
+ mods[iri] = set([dep])
+
+ if redundant and verbose:
+ print("** Warning: Redundant module dependency:")
+ for iri, dep in redundant:
+ print(f"{iri} -> {dep}")
+
+ return mods
+
cytoscape_style(style=None)
+
+
+¶Get list of color, style and fills.
+ +ontopy/graph.py
def cytoscape_style(style=None): # pylint: disable=too-many-branches
+ """Get list of color, style and fills."""
+ if not style:
+ style = _default_style
+ colours = {}
+ styles = {}
+ fill = {}
+ for key, value in style.items():
+ if isinstance(value, dict):
+ if "color" in value:
+ colours[key] = value["color"]
+ else:
+ colours[key] = "black"
+ if "style" in value:
+ styles[key] = value["style"]
+ else:
+ styles[key] = "solid"
+ if "arrowhead" in value:
+ if value["arrowhead"] == "empty":
+ fill[key] = "hollow"
+ else:
+ fill[key] = "filled"
+
+ for key, value in style.get("relations", {}).items():
+ if isinstance(value, dict):
+ if "color" in value:
+ colours[key] = value["color"]
+ else:
+ colours[key] = "black"
+ if "style" in value:
+ styles[key] = value["style"]
+ else:
+ styles[key] = "solid"
+ if "arrowhead" in value:
+ if value["arrowhead"] == "empty":
+ fill[key] = "hollow"
+ else:
+ fill[key] = "filled"
+ return [colours, styles, fill]
+
cytoscapegraph(graph, onto=None, infobox=None, force=False)
+
+
+¶Returns and instance of icytoscape-figure for an +instance Graph of OntoGraph, the accompanying ontology +is required for mouse actions.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
graph |
+ OntoGraph |
+ graph generated with OntoGraph with edgelabels=True. |
+ required | +
onto |
+ Optional[ontopy.ontology.Ontology] |
+ ontology to be used for mouse actions. |
+ None |
+
infobox |
+ str |
+ "left" or "right". Placement of infbox with + respect to graph. |
+ None |
+
force |
+ bool |
+ force generate graph without correct edgelabels. |
+ False |
+
Returns:
+Type | +Description | +
---|---|
GridspecLayout |
+ cytoscapewidget with graph and infobox to be visualized +in jupyter lab. |
+
ontopy/graph.py
def cytoscapegraph(
+ graph: OntoGraph,
+ onto: Optional[Ontology] = None,
+ infobox: str = None,
+ force: bool = False,
+) -> "GridspecLayout":
+ # pylint: disable=too-many-locals,too-many-statements
+ """Returns and instance of icytoscape-figure for an
+ instance Graph of OntoGraph, the accompanying ontology
+ is required for mouse actions.
+ Args:
+ graph: graph generated with OntoGraph with edgelabels=True.
+ onto: ontology to be used for mouse actions.
+ infobox: "left" or "right". Placement of infbox with
+ respect to graph.
+ force: force generate graph without correct edgelabels.
+ Returns:
+ cytoscapewidget with graph and infobox to be visualized
+ in jupyter lab.
+
+ """
+ # pylint: disable=import-error,import-outside-toplevel
+ from ipywidgets import Output, VBox, GridspecLayout
+ from IPython.display import display, Image
+ from pathlib import Path
+ import networkx as nx
+ import pydotplus
+ import ipycytoscape
+ from networkx.readwrite.json_graph import cytoscape_data
+
+ # Define the styles, this has to be aligned with the graphviz values
+ dotplus = pydotplus.graph_from_dot_data(graph.dot.source)
+ # if graph doesn't have multiedges, use dotplus.set_strict(true)
+ pydot_graph = nx.nx_pydot.from_pydot(dotplus)
+
+ colours, styles, fill = cytoscape_style()
+
+ data = cytoscape_data(pydot_graph)["elements"]
+ for datum in data["edges"]:
+ try:
+ datum["data"]["label"] = (
+ datum["data"]["label"].rsplit(" ", 1)[0].lstrip('"')
+ )
+ except KeyError as err:
+ if not force:
+ raise EMMOntoPyException(
+ "Edge label is not defined. Are you sure that the OntoGraph"
+ "instance you provided was generated with "
+ "´edgelabels=True´?"
+ ) from err
+ warnings.warn(
+ "ARROWS WILL NOT BE DISPLAYED CORRECTLY. "
+ "Edge label is not defined. Are you sure that the OntoGraph "
+ "instance you provided was generated with ´edgelabels=True´?"
+ )
+ datum["data"]["label"] = ""
+
+ lab = datum["data"]["label"].replace("Inverse(", "").rstrip(")")
+ try:
+ datum["data"]["colour"] = colours[lab]
+ except KeyError:
+ datum["data"]["colour"] = "black"
+ try:
+ datum["data"]["style"] = styles[lab]
+ except KeyError:
+ datum["data"]["style"] = "solid"
+ if datum["data"]["label"].startswith("Inverse("):
+ datum["data"]["targetarrow"] = "diamond"
+ datum["data"]["sourcearrow"] = "none"
+ else:
+ datum["data"]["targetarrow"] = "triangle"
+ datum["data"]["sourcearrow"] = "none"
+ try:
+ datum["data"]["fill"] = fill[lab]
+ except KeyError:
+ datum["data"]["fill"] = "filled"
+
+ cytofig = ipycytoscape.CytoscapeWidget()
+ cytofig.graph.add_graph_from_json(data, directed=True)
+
+ cytofig.set_style(
+ [
+ {
+ "selector": "node",
+ "css": {
+ "content": "data(label)",
+ # "text-valign": "center",
+ # "color": "white",
+ # "text-outline-width": 2,
+ # "text-outline-color": "red",
+ "background-color": "blue",
+ },
+ },
+ {"selector": "node:parent", "css": {"background-opacity": 0.333}},
+ {
+ "selector": "edge",
+ "style": {
+ "width": 2,
+ "line-color": "data(colour)",
+ # "content": "data(label)"",
+ "line-style": "data(style)",
+ },
+ },
+ {
+ "selector": "edge.directed",
+ "style": {
+ "curve-style": "bezier",
+ "target-arrow-shape": "data(targetarrow)",
+ "target-arrow-color": "data(colour)",
+ "target-arrow-fill": "data(fill)",
+ "mid-source-arrow-shape": "data(sourcearrow)",
+ "mid-source-arrow-color": "data(colour)",
+ },
+ },
+ {
+ "selector": "edge.multiple_edges",
+ "style": {"curve-style": "bezier"},
+ },
+ {
+ "selector": ":selected",
+ "css": {
+ "background-color": "black",
+ "line-color": "black",
+ "target-arrow-color": "black",
+ "source-arrow-color": "black",
+ "text-outline-color": "black",
+ },
+ },
+ ]
+ )
+
+ if onto is not None:
+ out = Output(layout={"border": "1px solid black"})
+
+ def log_clicks(node):
+ with out:
+ print((onto.get_by_label(node["data"]["label"])))
+ parent = onto.get_by_label(node["data"]["label"]).get_parents()
+ print(f"parents: {parent}")
+ try:
+ elucidation = onto.get_by_label(
+ node["data"]["label"]
+ ).elucidation
+ print(f"elucidation: {elucidation[0]}")
+ except (AttributeError, IndexError):
+ pass
+
+ try:
+ annotations = onto.get_by_label(
+ node["data"]["label"]
+ ).annotations
+ for _ in annotations:
+ print(f"annotation: {_}")
+ except AttributeError:
+ pass
+
+ # Try does not work...
+ try:
+ iri = onto.get_by_label(node["data"]["label"]).iri
+ print(f"iri: {iri}")
+ except (AttributeError, IndexError):
+ pass
+ try:
+ fig = node["data"]["label"]
+ if os.path.exists(Path(fig + ".png")):
+ display(Image(fig + ".png", width=100))
+ elif os.path.exists(Path(fig + ".jpg")):
+ display(Image(fig + ".jpg", width=100))
+ except (AttributeError, IndexError):
+ pass
+ out.clear_output(wait=True)
+
+ def log_mouseovers(node):
+ with out:
+ print(onto.get_by_label(node["data"]["label"]))
+ # print(f'mouseover: {pformat(node)}')
+ out.clear_output(wait=True)
+
+ cytofig.on("node", "click", log_clicks)
+ cytofig.on("node", "mouseover", log_mouseovers) # , remove=True)
+ cytofig.on("node", "mouseout", out.clear_output(wait=True))
+ grid = GridspecLayout(1, 3, height="400px")
+ if infobox == "left":
+ grid[0, 0] = out
+ grid[0, 1:] = cytofig
+ elif infobox == "right":
+ grid[0, 0:-1] = cytofig
+ grid[0, 2] = out
+ else:
+ return VBox([cytofig, out])
+ return grid
+
+ return cytofig
+
filter_classes(classes, included_namespaces=(), included_ontologies=())
+
+
+¶Filter out classes whos namespace is not in included_namespaces
+or whos ontology name is not in one of the ontologies in
+included_ontologies
.
classes
should be a sequence of classes.
ontopy/graph.py
def filter_classes(classes, included_namespaces=(), included_ontologies=()):
+ """Filter out classes whos namespace is not in `included_namespaces`
+ or whos ontology name is not in one of the ontologies in
+ `included_ontologies`.
+
+ `classes` should be a sequence of classes.
+ """
+ filtered = set(classes)
+ if included_namespaces:
+ filtered = set(
+ c for c in filtered if c.namespace.name in included_namespaces
+ )
+ if included_ontologies:
+ filtered = set(
+ c
+ for c in filtered
+ if c.namespace.ontology.name in included_ontologies
+ )
+ return filtered
+
get_module_dependencies(iri_or_onto, strip_base=None)
+
+
+¶Reads iri_or_onto
and returns a dict mapping ontology names to a
+list of ontologies that they depends on.
If strip_base
is true, the base IRI is stripped from ontology
+names. If it is a string, it lstrip'ped from the base iri.
ontopy/graph.py
def get_module_dependencies(iri_or_onto, strip_base=None):
+ """Reads `iri_or_onto` and returns a dict mapping ontology names to a
+ list of ontologies that they depends on.
+
+ If `strip_base` is true, the base IRI is stripped from ontology
+ names. If it is a string, it lstrip'ped from the base iri.
+ """
+ from ontopy.ontology import ( # pylint: disable=import-outside-toplevel
+ get_ontology,
+ )
+
+ if isinstance(iri_or_onto, str):
+ onto = get_ontology(iri_or_onto)
+ onto.load()
+ else:
+ onto = iri_or_onto
+
+ modules = {onto.base_iri: set()}
+
+ def strip(base_iri):
+ if isinstance(strip_base, str):
+ return base_iri.lstrip(strip_base)
+ if strip_base:
+ return base_iri.strip(onto.base_iri)
+ return base_iri
+
+ visited = set()
+
+ def setmodules(onto):
+ for imported_onto in onto.imported_ontologies:
+ if onto.base_iri in modules:
+ modules[strip(onto.base_iri)].add(strip(imported_onto.base_iri))
+ else:
+ modules[strip(onto.base_iri)] = set(
+ [strip(imported_onto.base_iri)]
+ )
+ if imported_onto.base_iri not in modules:
+ modules[strip(imported_onto.base_iri)] = set()
+ if imported_onto not in visited:
+ visited.add(imported_onto)
+ setmodules(imported_onto)
+
+ setmodules(onto)
+ return modules
+
plot_modules(src, filename=None, fmt=None, show=False, strip_base=None, ignore_redundant=True)
+
+
+¶Plot module dependency graph for src
and return a graph object.
Here src
may be an IRI, a path the the ontology or a dict returned by
+get_module_dependencies().
If filename
is given, write the graph to this file.
If fmt
is None, the output format is inferred from filename
.
If show
is true, the graph is displayed.
strip_base
is passed on to get_module_dependencies() if src
is not
+a dict.
If ignore_redundant
is true, redundant dependencies are not plotted.
ontopy/graph.py
def plot_modules( # pylint: disable=too-many-arguments
+ src,
+ filename=None,
+ fmt=None,
+ show=False,
+ strip_base=None,
+ ignore_redundant=True,
+):
+ """Plot module dependency graph for `src` and return a graph object.
+
+ Here `src` may be an IRI, a path the the ontology or a dict returned by
+ get_module_dependencies().
+
+ If `filename` is given, write the graph to this file.
+
+ If `fmt` is None, the output format is inferred from `filename`.
+
+ If `show` is true, the graph is displayed.
+
+ `strip_base` is passed on to get_module_dependencies() if `src` is not
+ a dict.
+
+ If `ignore_redundant` is true, redundant dependencies are not plotted.
+ """
+ if isinstance(src, dict):
+ modules = src
+ else:
+ modules = get_module_dependencies(src, strip_base=strip_base)
+
+ if ignore_redundant:
+ modules = check_module_dependencies(modules, verbose=False)
+
+ dot = graphviz.Digraph(comment="Module dependencies")
+ dot.attr(rankdir="TB")
+ dot.node_attr.update(
+ style="filled", fillcolor="lightblue", shape="box", edgecolor="blue"
+ )
+ dot.edge_attr.update(arrowtail="open", dir="back")
+
+ for iri in modules.keys():
+ iriname = iri.split(":", 1)[1]
+ dot.node(iriname, label=iri, URL=iri)
+
+ for iri, deps in modules.items():
+ for dep in deps:
+ iriname = iri.split(":", 1)[1]
+ depname = dep.split(":", 1)[1]
+ dot.edge(depname, iriname)
+
+ if filename:
+ base, ext = os.path.splitext(filename)
+ if fmt is None:
+ fmt = ext.lstrip(".")
+ dot.render(base, format=fmt, view=False, cleanup=True)
+
+ if show:
+ dot.view(cleanup=True)
+
+ return dot
+
Evaluate Manchester syntax
+This module compiles restrictions and logical constructs in Manchester
+syntax into Owlready2 classes. The main function in this module is
+manchester.evaluate()
, see its docstring for usage example.
Pyparsing is used under the hood for parsing.
+ + + +
+ManchesterError (EMMOntoPyException)
+
+
+
+
+¶Raised on invalid Manchester notation.
+ +ontopy/manchester.py
class ManchesterError(EMMOntoPyException):
+ """Raised on invalid Manchester notation."""
+
evaluate(ontology, expr)
+
+
+¶Evaluate expression in Manchester syntax.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
ontology |
+ Ontology |
+ The ontology within which the expression will be evaluated. |
+ required | +
expr |
+ str |
+ Manchester expression to be evaluated. |
+ required | +
Returns:
+Type | +Description | +
---|---|
Construct |
+ An Owlready2 construct that corresponds to the expression. |
+
Examples:
+ +++++++from ontopy.manchester import evaluate +from ontopy import get_ontology +emmo = get_ontology().load()
+restriction = evaluate(emmo, 'hasPart some Atom') +cls = evaluate(emmo, 'Atom') +expr = evaluate(emmo, 'Atom or Molecule')
+
Note
+Logical expressions (with not
, and
and or
) are supported as
+well as object property restrictions. For data properterties are
+only value restrictions supported so far.
ontopy/manchester.py
def evaluate(ontology: owlready2.Ontology, expr: str) -> owlready2.Construct:
+ """Evaluate expression in Manchester syntax.
+
+ Args:
+ ontology: The ontology within which the expression will be evaluated.
+ expr: Manchester expression to be evaluated.
+
+ Returns:
+ An Owlready2 construct that corresponds to the expression.
+
+ Example:
+ >>> from ontopy.manchester import evaluate
+ >>> from ontopy import get_ontology
+ >>> emmo = get_ontology().load()
+
+ >>> restriction = evaluate(emmo, 'hasPart some Atom')
+ >>> cls = evaluate(emmo, 'Atom')
+ >>> expr = evaluate(emmo, 'Atom or Molecule')
+
+ Note:
+ Logical expressions (with `not`, `and` and `or`) are supported as
+ well as object property restrictions. For data properterties are
+ only value restrictions supported so far.
+ """
+
+ # pylint: disable=invalid-name
+ def _parse_literal(r):
+ """Compiles literal to Owlready2 type."""
+ if r.language:
+ v = owlready2.locstr(r.string, r.language)
+ elif r.number:
+ v = r.number
+ else:
+ v = r.string
+ return v
+
+ # pylint: disable=invalid-name,no-else-return,too-many-return-statements
+ # pylint: disable=too-many-branches
+ def _eval(r):
+ """Recursively evaluate expression produced by pyparsing into an
+ Owlready2 construct."""
+
+ def fneg(x):
+ """Negates the argument if `neg` is true."""
+ return owlready2.Not(x) if neg else x
+
+ if isinstance(r, str): # r is atomic, returns its owlready2 repr
+ return ontology[r]
+ neg = False # whether the expression starts with "not"
+ while r[0] == "not":
+ r.pop(0) # strip off the "not" and proceed
+ neg = not neg
+
+ if len(r) == 1: # r is either a atomic or a parenthesised
+ # subexpression that should be further evaluated
+ if isinstance(r[0], str):
+ return fneg(ontology[r[0]])
+ else:
+ return fneg(_eval(r[0]))
+ elif r.op: # r contains a logical operator: and/or
+ ops = {"and": owlready2.And, "or": owlready2.Or}
+ op = ops[r.op]
+ if len(r) == 3:
+ return op([fneg(_eval(r[0])), _eval(r[2])])
+ else:
+ arg1 = fneg(_eval(r[0]))
+ r.pop(0)
+ r.pop(0)
+ return op([arg1, _eval(r)])
+ elif r.objProp: # r is a restriction
+ if r[0] == "inverse":
+ r.pop(0)
+ prop = owlready2.Inverse(ontology[r[0]])
+ else:
+ prop = ontology[r[0]]
+ rtype = r[1]
+ if rtype == "Self":
+ return fneg(prop.has_self())
+ r.pop(0)
+ r.pop(0)
+ f = getattr(prop, rtype)
+ if rtype == "value":
+ return fneg(f(_eval(r)))
+ elif rtype in ("some", "only"):
+ return fneg(f(_eval(r)))
+ elif rtype in ("min", "max", "exactly"):
+ cardinality = r.pop(0)
+ return fneg(f(cardinality, _eval(r)))
+ else:
+ raise ManchesterError(f"invalid restriction type: {rtype}")
+ elif r.dataProp: # r is a data property restriction
+ prop = ontology[r[0]]
+ rtype = r[1]
+ r.pop(0)
+ r.pop(0)
+ f = getattr(prop, rtype)
+ if rtype == "value":
+ return f(_parse_literal(r))
+ else:
+ raise ManchesterError(
+ f"unimplemented data property restriction: "
+ f"{prop} {rtype} {r}"
+ )
+ else:
+ raise ManchesterError(f"invalid expression: {r}")
+
+ grammar = manchester_expression()
+ return _eval(grammar.parseString(expr, parseAll=True))
+
manchester_expression()
+
+
+¶Returns pyparsing grammar for a Manchester expression.
+This function is mostly for internal use.
+See also: https://www.w3.org/TR/owl2-manchester-syntax/
+ +ontopy/manchester.py
def manchester_expression():
+ """Returns pyparsing grammar for a Manchester expression.
+
+ This function is mostly for internal use.
+
+ See also: https://www.w3.org/TR/owl2-manchester-syntax/
+ """
+ # pylint: disable=global-statement,invalid-name,too-many-locals
+ global GRAMMAR
+ if GRAMMAR:
+ return GRAMMAR
+
+ # Subset of the Manchester grammar for expressions
+ # It is based on https://www.w3.org/TR/owl2-manchester-syntax/
+ # but allows logical constructs within restrictions (like Protege)
+ ident = pp.Word(pp.alphas + "_:-", pp.alphanums + "_:-", asKeyword=True)
+ uint = pp.Word(pp.nums)
+ alphas = pp.Word(pp.alphas)
+ string = pp.Word(pp.alphanums + ":")
+ quotedString = (
+ pp.QuotedString('"""', multiline=True) | pp.QuotedString('"')
+ )("string")
+ typedLiteral = pp.Combine(quotedString + "^^" + string("datatype"))
+ stringLanguageLiteral = pp.Combine(quotedString + "@" + alphas("language"))
+ stringLiteral = quotedString
+ numberLiteral = pp.pyparsing_common.number("number")
+ literal = (
+ typedLiteral | stringLanguageLiteral | stringLiteral | numberLiteral
+ )
+ logOp = pp.one_of(["and", "or"], asKeyword=True)
+ expr = pp.Forward()
+ restriction = pp.Forward()
+ primary = pp.Keyword("not")[...] + (
+ restriction | ident("cls") | pp.nested_expr("(", ")", expr)
+ )
+ objPropExpr = (
+ pp.Literal("inverse")
+ + pp.Suppress("(")
+ + ident("objProp")
+ + pp.Suppress(")")
+ | pp.Literal("inverse") + ident("objProp")
+ | ident("objProp")
+ )
+ dataPropExpr = ident("dataProp")
+ restriction <<= (
+ objPropExpr + pp.Keyword("some") + expr
+ | objPropExpr + pp.Keyword("only") + expr
+ | objPropExpr + pp.Keyword("Self")
+ | objPropExpr + pp.Keyword("value") + ident("individual")
+ | objPropExpr + pp.Keyword("min") + uint + expr
+ | objPropExpr + pp.Keyword("max") + uint + expr
+ | objPropExpr + pp.Keyword("exactly") + uint + expr
+ | dataPropExpr + pp.Keyword("value") + literal
+ )
+ expr <<= primary + (logOp("op") + expr)[...]
+
+ GRAMMAR = expr
+ return expr
+
A nested dict with both attribute and item access.
+NA stands for Nested and Attribute.
+ + + +
+NADict
+
+
+
+¶A nested dict with both attribute and item access.
+It is intended to be used with keys that are valid Python +identifiers. However, except for string keys containing a dot, +there are actually no hard limitations. If a key equals an existing +attribute name, attribute access is of cause not possible.
+Nested items can be accessed via a dot notation, as shown in the +example below.
+++++++n = NADict(a=1, b=NADict(c=3, d=4)) +n['a'] +1 +n.a +1 +n['b.c'] +3 +n.b.c +3 +n['b.e'] = 5 +n.b.e +5
+
_dict : dict + Dictionary holding the actial items.
+ +ontopy/nadict.py
class NADict:
+ """A nested dict with both attribute and item access.
+
+ It is intended to be used with keys that are valid Python
+ identifiers. However, except for string keys containing a dot,
+ there are actually no hard limitations. If a key equals an existing
+ attribute name, attribute access is of cause not possible.
+
+ Nested items can be accessed via a dot notation, as shown in the
+ example below.
+
+ Examples
+ --------
+ >>> n = NADict(a=1, b=NADict(c=3, d=4))
+ >>> n['a']
+ 1
+ >>> n.a
+ 1
+ >>> n['b.c']
+ 3
+ >>> n.b.c
+ 3
+ >>> n['b.e'] = 5
+ >>> n.b.e
+ 5
+
+ Attributes
+ ----------
+ _dict : dict
+ Dictionary holding the actial items.
+ """
+
+ def __init__(self, *args, **kw):
+ object.__setattr__(self, "_dict", {})
+ self.update(*args, **kw)
+
+ def __getitem__(self, key):
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1][key2]
+ return self._dict[key]
+
+ def __setitem__(self, key, value):
+ if key in (
+ "clear",
+ "copy",
+ "fromkeys",
+ "get",
+ "items",
+ "keys",
+ "pop",
+ "popitem",
+ "setdefault",
+ "update",
+ "values",
+ ):
+ raise ValueError(
+ f"invalid key {key!r}: must not override supported dict method"
+ " names"
+ )
+
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ if key1 not in self._dict:
+ self._dict[key1] = NADict()
+ self._dict[key1][key2] = value
+ elif key in self._dict:
+ if isinstance(self._dict[key], NADict):
+ self._dict[key].update(value)
+ else:
+ self._dict[key] = value
+ else:
+ if isinstance(value, Mapping):
+ self._dict[key] = NADict(value)
+ else:
+ self._dict[key] = value
+
+ def __delitem__(self, key):
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ del self._dict[key1][key2]
+ else:
+ del self._dict[key]
+
+ def __getattr__(self, key):
+ if key not in self._dict:
+ raise AttributeError(f"No such key: {key}")
+ return self._dict[key]
+
+ def __setattr__(self, key, value):
+ if key in self._dict:
+ self._dict[key] = value
+ else:
+ object.__setattr__(self, key, value)
+
+ def __delattr__(self, key):
+ if key in self._dict:
+ del self._dict[key]
+ else:
+ object.__delattr__(self, key)
+
+ def __len__(self):
+ return len(self._dict)
+
+ def __contains__(self, key):
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return key2 in self._dict[key1]
+ return key in self._dict
+
+ def __iter__(self, prefix=""):
+ for key, value in self._dict.items():
+ key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, NADict):
+ yield from value.__iter__(key)
+ else:
+ yield key
+
+ def __repr__(self):
+ return (
+ f"{self.__class__.__name__}("
+ f"{', '.join(f'{key}={value!r}' for key, value in self._dict.items())})" # pylint: disable=line-too-long
+ )
+
+ def clear(self):
+ """Clear all keys."""
+ self._dict.clear()
+
+ def copy(self):
+ """Returns a deep copy of self."""
+ return copy.deepcopy(self)
+
+ @staticmethod
+ def fromkeys(iterable, value=None):
+ """Returns a new NADict with keys from `iterable` and values
+ set to `value`."""
+ res = NADict()
+ for key in iterable:
+ res[key] = value
+ return res
+
+ def get(self, key, default=None):
+ """Returns the value for `key` if `key` is in self, else return
+ `default`."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].get(key2, default)
+ return self._dict.get(key, default)
+
+ def items(self, prefix=""):
+ """Returns an iterator over all items as (key, value) pairs."""
+ for key, value in self._dict.items():
+ key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, NADict):
+ yield from value.items(key)
+ else:
+ yield (key, value)
+
+ def keys(self, prefix=""):
+ """Returns an iterator over all keys."""
+ for key, value in self._dict.items():
+ key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, NADict):
+ yield from value.keys(key)
+ else:
+ yield key
+
+ def pop(self, key, default=None):
+ """Removed `key` and returns corresponding value. If `key` is not
+ found, `default` is returned if given, otherwise KeyError is
+ raised."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].pop(key2, default)
+ return self._dict.pop(key, default)
+
+ def popitem(self, prefix=""):
+ """Removes and returns some (key, value). Raises KeyError if empty."""
+ item = self._dict.popitem()
+ if isinstance(item, NADict):
+ key, value = item
+ item2 = item.popitem(key)
+ self._dict[key] = value
+ return item2
+ key, value = self._dict.popitem()
+ key = f"{prefix}.{key}" if prefix else key
+ return (key, value)
+
+ def setdefault(self, key, value=None):
+ """Inserts `key` and `value` pair if key is not found.
+
+ Returns the new value for `key`."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].setdefault(key2, value)
+ return self._dict.setdefault(key, value)
+
+ def update(self, *args, **kwargs):
+ """Updates self with dict/iterable from `args` and keyword arguments
+ from `kw`."""
+ for arg in args:
+ if hasattr(arg, "keys"):
+ for _ in arg:
+ self[_] = arg[_]
+ else:
+ for key, value in arg:
+ self[key] = value
+ for key, value in kwargs.items():
+ self[key] = value
+
+ def values(self):
+ """Returns a set-like providing a view of all style values."""
+ return self._dict.values()
+
clear(self)
+
+
+¶Clear all keys.
+ +ontopy/nadict.py
def clear(self):
+ """Clear all keys."""
+ self._dict.clear()
+
copy(self)
+
+
+¶Returns a deep copy of self.
+ +ontopy/nadict.py
def copy(self):
+ """Returns a deep copy of self."""
+ return copy.deepcopy(self)
+
fromkeys(iterable, value=None)
+
+
+ staticmethod
+
+
+¶Returns a new NADict with keys from iterable
and values
+set to value
.
ontopy/nadict.py
@staticmethod
+def fromkeys(iterable, value=None):
+ """Returns a new NADict with keys from `iterable` and values
+ set to `value`."""
+ res = NADict()
+ for key in iterable:
+ res[key] = value
+ return res
+
get(self, key, default=None)
+
+
+¶Returns the value for key
if key
is in self, else return
+default
.
ontopy/nadict.py
def get(self, key, default=None):
+ """Returns the value for `key` if `key` is in self, else return
+ `default`."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].get(key2, default)
+ return self._dict.get(key, default)
+
items(self, prefix='')
+
+
+¶Returns an iterator over all items as (key, value) pairs.
+ +ontopy/nadict.py
def items(self, prefix=""):
+ """Returns an iterator over all items as (key, value) pairs."""
+ for key, value in self._dict.items():
+ key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, NADict):
+ yield from value.items(key)
+ else:
+ yield (key, value)
+
keys(self, prefix='')
+
+
+¶Returns an iterator over all keys.
+ +ontopy/nadict.py
def keys(self, prefix=""):
+ """Returns an iterator over all keys."""
+ for key, value in self._dict.items():
+ key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, NADict):
+ yield from value.keys(key)
+ else:
+ yield key
+
pop(self, key, default=None)
+
+
+¶Removed key
and returns corresponding value. If key
is not
+found, default
is returned if given, otherwise KeyError is
+raised.
ontopy/nadict.py
def pop(self, key, default=None):
+ """Removed `key` and returns corresponding value. If `key` is not
+ found, `default` is returned if given, otherwise KeyError is
+ raised."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].pop(key2, default)
+ return self._dict.pop(key, default)
+
popitem(self, prefix='')
+
+
+¶Removes and returns some (key, value). Raises KeyError if empty.
+ +ontopy/nadict.py
def popitem(self, prefix=""):
+ """Removes and returns some (key, value). Raises KeyError if empty."""
+ item = self._dict.popitem()
+ if isinstance(item, NADict):
+ key, value = item
+ item2 = item.popitem(key)
+ self._dict[key] = value
+ return item2
+ key, value = self._dict.popitem()
+ key = f"{prefix}.{key}" if prefix else key
+ return (key, value)
+
setdefault(self, key, value=None)
+
+
+¶Inserts key
and value
pair if key is not found.
Returns the new value for key
.
ontopy/nadict.py
def setdefault(self, key, value=None):
+ """Inserts `key` and `value` pair if key is not found.
+
+ Returns the new value for `key`."""
+ if "." in key:
+ key1, key2 = key.split(".", 1)
+ return self._dict[key1].setdefault(key2, value)
+ return self._dict.setdefault(key, value)
+
update(self, *args, **kwargs)
+
+
+¶Updates self with dict/iterable from args
and keyword arguments
+from kw
.
ontopy/nadict.py
def update(self, *args, **kwargs):
+ """Updates self with dict/iterable from `args` and keyword arguments
+ from `kw`."""
+ for arg in args:
+ if hasattr(arg, "keys"):
+ for _ in arg:
+ self[_] = arg[_]
+ else:
+ for key, value in arg:
+ self[key] = value
+ for key, value in kwargs.items():
+ self[key] = value
+
values(self)
+
+
+¶Returns a set-like providing a view of all style values.
+ +ontopy/nadict.py
def values(self):
+ """Returns a set-like providing a view of all style values."""
+ return self._dict.values()
+
A module for documenting ontologies.
+ + + +
+AttributeDict (dict)
+
+
+
+
+¶A dict with attribute access.
+Note that methods like key() and update() may be overridden.
+ +ontopy/ontodoc.py
class AttributeDict(dict):
+ """A dict with attribute access.
+
+ Note that methods like key() and update() may be overridden."""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.__dict__ = self
+
+DocPP
+
+
+
+¶Documentation pre-processor.
+It supports the following features:
+Comment lines
+%% Comment line...
+
Insert header with given level
+%HEADER label [level=1]
+
Insert figure with optional caption and width. filepath
+ should be relative to basedir
. If width is 0, no width will
+ be specified.
%FIGURE filepath [caption='' width=0px]
+
Include other markdown files. Header levels may be up or down with
+ shift
%INCLUDE filepath [shift=0]
+
Insert generated documentation for ontology entity. The header
+ level may be set with header_level
.
%ENTITY name [header_level=3]
+
Insert generated documentation for ontology branch name
. Options:
include_leaves: Whether to include leaves as end points + to the branch.
+%BRANCH name [header_level=3 terminated=1 include_leaves=0 + namespaces='' ontologies='']
+Insert generated figure of ontology branch name
. The figure
+ is written to path
. The default path is figdir
/name
,
+ where figdir
is given at class initiation. It is recommended
+ to exclude the file extension from path
. In this case, the
+ default figformat will be used (and easily adjusted to the
+ correct format required by the backend). leaves
may be a comma-
+ separated list of leaf node names.
%BRANCHFIG name [path='' caption='' terminated=1 include_leaves=1
+ strict_leaves=1, width=0px leaves='' relations=all
+ edgelabels=0 namespaces='' ontologies='']
+
This is a combination of the %HEADER and %BRANCHFIG directives.
+%BRANCHHEAD name [level=2 path='' caption='' terminated=1
+ include_leaves=1 width=0px leaves='']
+
This is a combination of the %HEADER, %BRANCHFIG and %BRANCH
+ directives. It inserts documentation of branch name
, with a
+ header followed by a figure and then documentation of each
+ element.
%BRANCHDOC name [level=2 path='' title='' caption='' terminated=1
+ strict_leaves=1 width=0px leaves='' relations='all'
+ rankdir='BT' legend=1 namespaces='' ontologies='']
+
Insert generated documentation for all entities of the given type.
+ Valid values of type
are: "classes", "individuals",
+ "object_properties", "data_properties", "annotations_properties"
%ALL type [header_level=3, namespaces='', ontologies='']
+
Insert generated figure of all entities of the given type.
+ Valid values of type
are: "classes", "object_properties" and
+ "data_properties".
%ALLFIG type
+
template : str + Input template. +ontodoc : OntoDoc instance + Instance of OntoDoc +basedir : str + Base directory for including relative file paths. +figdir : str + Default directory to store generated figures. +figformat : str + Default format for generated figures. +figscale : float + Default scaling of generated figures. +maxwidth : float + Maximum figure width. Figures larger than this will be rescaled. +imported : bool + Whether to include imported entities.
+ +ontopy/ontodoc.py
class DocPP: # pylint: disable=too-many-instance-attributes
+ """Documentation pre-processor.
+
+ It supports the following features:
+
+ * Comment lines
+
+ %% Comment line...
+
+ * Insert header with given level
+
+ %HEADER label [level=1]
+
+ * Insert figure with optional caption and width. `filepath`
+ should be relative to `basedir`. If width is 0, no width will
+ be specified.
+
+ %FIGURE filepath [caption='' width=0px]
+
+ * Include other markdown files. Header levels may be up or down with
+ `shift`
+
+ %INCLUDE filepath [shift=0]
+
+ * Insert generated documentation for ontology entity. The header
+ level may be set with `header_level`.
+
+ %ENTITY name [header_level=3]
+
+ * Insert generated documentation for ontology branch `name`. Options:
+ - header_level: Header level.
+ - terminated: Whether to branch should be terminated at all branch
+ names in the final document.
+ - include_leaves: Whether to include leaves as end points
+ to the branch.
+
+ %BRANCH name [header_level=3 terminated=1 include_leaves=0
+ namespaces='' ontologies='']
+
+ * Insert generated figure of ontology branch `name`. The figure
+ is written to `path`. The default path is `figdir`/`name`,
+ where `figdir` is given at class initiation. It is recommended
+ to exclude the file extension from `path`. In this case, the
+ default figformat will be used (and easily adjusted to the
+ correct format required by the backend). `leaves` may be a comma-
+ separated list of leaf node names.
+
+ %BRANCHFIG name [path='' caption='' terminated=1 include_leaves=1
+ strict_leaves=1, width=0px leaves='' relations=all
+ edgelabels=0 namespaces='' ontologies='']
+
+ * This is a combination of the %HEADER and %BRANCHFIG directives.
+
+ %BRANCHHEAD name [level=2 path='' caption='' terminated=1
+ include_leaves=1 width=0px leaves='']
+
+ * This is a combination of the %HEADER, %BRANCHFIG and %BRANCH
+ directives. It inserts documentation of branch `name`, with a
+ header followed by a figure and then documentation of each
+ element.
+
+ %BRANCHDOC name [level=2 path='' title='' caption='' terminated=1
+ strict_leaves=1 width=0px leaves='' relations='all'
+ rankdir='BT' legend=1 namespaces='' ontologies='']
+
+ * Insert generated documentation for all entities of the given type.
+ Valid values of `type` are: "classes", "individuals",
+ "object_properties", "data_properties", "annotations_properties"
+
+ %ALL type [header_level=3, namespaces='', ontologies='']
+
+ * Insert generated figure of all entities of the given type.
+ Valid values of `type` are: "classes", "object_properties" and
+ "data_properties".
+
+ %ALLFIG type
+
+ Parameters
+ ----------
+ template : str
+ Input template.
+ ontodoc : OntoDoc instance
+ Instance of OntoDoc
+ basedir : str
+ Base directory for including relative file paths.
+ figdir : str
+ Default directory to store generated figures.
+ figformat : str
+ Default format for generated figures.
+ figscale : float
+ Default scaling of generated figures.
+ maxwidth : float
+ Maximum figure width. Figures larger than this will be rescaled.
+ imported : bool
+ Whether to include imported entities.
+ """
+
+ # FIXME - this class should be refractured:
+ # * Instead of rescan the entire document for each pre-processer
+ # directive, we should scan the source like by line and handle
+ # each directive as they occour.
+ # * The current implementation has a lot of dublicated code.
+ # * Instead of modifying the source in-place, we should copy to a
+ # result list. This will make good error reporting much easier.
+ # * Branch leaves are only looked up in the file witht the %BRANCH
+ # directive, not in all included files as expedted.
+
+ def __init__( # pylint: disable=too-many-arguments
+ self,
+ template,
+ ontodoc,
+ basedir=".",
+ figdir="genfigs",
+ figformat="png",
+ figscale=1.0,
+ maxwidth=None,
+ imported=False,
+ ):
+ self.lines = template.split("\n")
+ self.ontodoc = ontodoc
+ self.basedir = basedir
+ self.figdir = os.path.join(basedir, figdir)
+ self.figformat = figformat
+ self.figscale = figscale
+ self.maxwidth = maxwidth
+ self.imported = imported
+ self._branch_cache = None
+ self._processed = False # Whether process() has been called
+
+ def __str__(self):
+ return self.get_buffer()
+
+ def get_buffer(self):
+ """Returns the current buffer."""
+ return "\n".join(self.lines)
+
+ def copy(self):
+ """Returns a copy of self."""
+ docpp = DocPP(
+ "",
+ self.ontodoc,
+ self.basedir,
+ figformat=self.figformat,
+ figscale=self.figscale,
+ maxwidth=self.maxwidth,
+ )
+ docpp.lines[:] = self.lines
+ docpp.figdir = self.figdir
+ return docpp
+
+ def get_branches(self):
+ """Returns a list with all branch names as specified with %BRANCH
+ (in current and all included documents). The returned value is
+ cached for efficiency purposes and so that it is not lost after
+ processing branches."""
+ if self._branch_cache is None:
+ names = []
+ docpp = self.copy()
+ docpp.process_includes()
+ for line in docpp.lines:
+ if line.startswith("%BRANCH"):
+ names.append(shlex.split(line)[1])
+ self._branch_cache = names
+ return self._branch_cache
+
+ def shift_header_levels(self, shift):
+ """Shift header level of all hashtag-headers in buffer. Underline
+ headers are ignored."""
+ if not shift:
+ return
+ pat = re.compile("^#+ ")
+ for i, line in enumerate(self.lines):
+ match = pat.match(line)
+ if match:
+ if shift > 0:
+ self.lines[i] = "#" * shift + line
+ elif shift < 0:
+ counter = match.end()
+ if shift > counter:
+ self.lines[i] = line.lstrip("# ")
+ else:
+ self.lines[i] = line[counter:]
+
+ def process_comments(self):
+ """Strips out comment lines starting with "%%"."""
+ self.lines = [line for line in self.lines if not line.startswith("%%")]
+
+ def process_headers(self):
+ """Expand all %HEADER specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%HEADER "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(tokens[2:], level=1)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_header(
+ name, int(opts.level) # pylint: disable=no-member
+ ).split("\n")
+
+ def process_figures(self):
+ """Expand all %FIGURE specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%FIGURE "):
+ tokens = shlex.split(line)
+ path = tokens[1]
+ opts = get_options(tokens[2:], caption="", width=0)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_figure(
+ os.path.join(self.basedir, path),
+ caption=opts.caption, # pylint: disable=no-member
+ width=opts.width, # pylint: disable=no-member
+ ).split("\n")
+
+ def process_entities(self):
+ """Expand all %ENTITY specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ENTITY "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(tokens[2:], header_level=3)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemdoc(
+ name, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
+ def process_branches(self):
+ """Expand all %BRANCH specifications."""
+ onto = self.ontodoc.onto
+
+ # Get all branch names in final document
+ names = self.get_branches()
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCH "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ header_level=3,
+ terminated=1,
+ include_leaves=0,
+ namespaces="",
+ ontologies="",
+ )
+ leaves = (
+ names if opts.terminated else ()
+ ) # pylint: disable=no-member
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ branch = filter_classes(
+ onto.get_branch(
+ name, leaves, opts.include_leaves
+ ), # pylint: disable=no-member
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemsdoc(
+ branch, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
+ def _make_branchfig( # pylint: disable=too-many-arguments,too-many-locals
+ self,
+ name: str,
+ path: "Union[Path, str]",
+ terminated: bool,
+ include_leaves: bool,
+ strict_leaves: bool,
+ width: float,
+ leaves: "Union[str, list[str]]",
+ relations: str,
+ edgelabels: str,
+ rankdir: str,
+ legend: bool,
+ included_namespaces: "Iterable[str]",
+ included_ontologies: "Iterable[str]",
+ ) -> "tuple[str, list[str], float]":
+ """Help method for process_branchfig().
+
+ Args:
+ name: name of branch root
+ path: optional figure path name
+ include_leaves: whether to include leaves as end points
+ to the branch.
+ strict_leaves: whether to strictly exclude leave descendants
+ terminated: whether the graph should be terminated at leaf nodes
+ width: optional figure width
+ leaves: optional leaf node names for graph termination
+ relations: comma-separated list of relations to include
+ edgelabels: whether to include edgelabels
+ rankdir: graph direction (BT, TB, RL, LR)
+ legend: whether to add legend
+ included_namespaces: sequence of names of namespaces to be included
+ included_ontologies: sequence of names of ontologies to be included
+
+ Returns:
+ filepath: path to generated figure
+ leaves: used list of leaf node names
+ width: actual figure width
+
+ """
+ onto = self.ontodoc.onto
+ if leaves:
+ if isinstance(leaves, str):
+ leaves = leaves.split(",")
+ elif terminated:
+ leaves = set(self.get_branches())
+ leaves.discard(name)
+ else:
+ leaves = None
+ if path:
+ figdir = os.path.dirname(path)
+ formatext = os.path.splitext(path)[1]
+ if formatext:
+ fmt = formatext.lstrip(".")
+ else:
+ fmt = self.figformat
+ path += f".{fmt}"
+ else:
+ figdir = self.figdir
+ fmt = self.figformat
+ term = "T" if terminated else ""
+ path = os.path.join(figdir, name + term) + f".{fmt}"
+
+ # Create graph
+ graph = OntoGraph(onto, graph_attr={"rankdir": rankdir})
+ graph.add_branch(
+ root=name,
+ leaves=leaves,
+ include_leaves=include_leaves,
+ strict_leaves=strict_leaves,
+ relations=relations,
+ edgelabels=edgelabels,
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+ if legend:
+ graph.add_legend()
+
+ if not width:
+ figwidth, _ = graph.get_figsize()
+ width = self.figscale * figwidth
+ if self.maxwidth and width > self.maxwidth:
+ width = self.maxwidth
+
+ filepath = os.path.join(self.basedir, path)
+ destdir = os.path.dirname(filepath)
+ if not os.path.exists(destdir):
+ os.makedirs(destdir)
+ graph.save(filepath, fmt=fmt)
+ return filepath, leaves, width
+
+ def process_branchfigs(self):
+ """Process all %BRANCHFIG directives."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCHFIG "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ path="",
+ caption="",
+ terminated=1,
+ include_leaves=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="all",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ filepath, _, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ opts.include_leaves, # pylint: disable=no-member
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_figure(
+ filepath,
+ caption=opts.caption,
+ width=width, # pylint: disable=no-member
+ ).split("\n")
+
+ def process_branchdocs(self): # pylint: disable=too-many-locals
+ """Process all %BRANCHDOC and %BRANCHEAD directives."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCHDOC ") or line.startswith(
+ "%BRANCHHEAD "
+ ):
+ with_branch = bool(line.startswith("%BRANCHDOC "))
+ tokens = shlex.split(line)
+ name = tokens[1]
+ title = camelsplit(name)
+ title = title[0].upper() + title[1:] + " branch"
+ opts = get_options(
+ tokens[2:],
+ level=2,
+ path="",
+ title=title,
+ caption=title + ".",
+ terminated=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="all",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ include_leaves = 1
+ filepath, leaves, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ include_leaves,
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+
+ sec = []
+ sec.append(
+ self.ontodoc.get_header(opts.title, int(opts.level))
+ ) # pylint: disable=no-member
+ sec.append(
+ self.ontodoc.get_figure(
+ filepath,
+ caption=opts.caption,
+ width=width, # pylint: disable=no-member
+ )
+ )
+ if with_branch:
+ include_leaves = 0
+ branch = filter_classes(
+ onto.get_branch(name, leaves, include_leaves),
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+ sec.append(
+ self.ontodoc.itemsdoc(
+ branch, int(opts.level + 1)
+ ) # pylint: disable=no-member
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = sec
+
+ def process_alls(self):
+ """Expand all %ALL specifications."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ALL "):
+ tokens = shlex.split(line)
+ token = tokens[1]
+ opts = get_options(tokens[2:], header_level=3)
+ if token == "classes": # nosec
+ items = onto.classes(imported=self.imported)
+ elif token in ("object_properties", "relations"):
+ items = onto.object_properties(imported=self.imported)
+ elif token == "data_properties": # nosec
+ items = onto.data_properties(imported=self.imported)
+ elif token == "annotation_properties": # nosec
+ items = onto.annotation_properties(imported=self.imported)
+ elif token == "individuals": # nosec
+ items = onto.individuals(imported=self.imported)
+ else:
+ raise InvalidTemplateError(
+ f"Invalid argument to %%ALL: {token}"
+ )
+ items = sorted(items, key=get_label)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemsdoc(
+ items, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
+ def process_allfig(self): # pylint: disable=too-many-locals
+ """Process all %ALLFIG directives."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ALLFIG "):
+ tokens = shlex.split(line)
+ token = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ path="",
+ level=3,
+ terminated=0,
+ include_leaves=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="isA",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+ if token == "classes": # nosec
+ roots = onto.get_root_classes(imported=self.imported)
+ elif token in ("object_properties", "relations"):
+ roots = onto.get_root_object_properties(
+ imported=self.imported
+ )
+ elif token == "data_properties": # nosec
+ roots = onto.get_root_data_properties(
+ imported=self.imported
+ )
+ else:
+ raise InvalidTemplateError(
+ f"Invalid argument to %%ALLFIG: {token}"
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ sec = []
+ for root in roots:
+ name = asstring(root, link="{label}", ontology=onto)
+ filepath, _, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ opts.include_leaves, # pylint: disable=no-member
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+ title = f"Taxonomy of {name}."
+ sec.append(
+ self.ontodoc.get_header(title, int(opts.level))
+ ) # pylint: disable=no-member
+ sec.extend(
+ self.ontodoc.get_figure(
+ filepath, caption=title, width=width
+ ).split("\n")
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = sec
+
+ def process_includes(self):
+ """Process all %INCLUDE directives."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%INCLUDE "):
+ tokens = shlex.split(line)
+ filepath = tokens[1]
+ opts = get_options(tokens[2:], shift=0)
+ with open(
+ os.path.join(self.basedir, filepath), "rt", encoding="utf8"
+ ) as handle:
+ docpp = DocPP(
+ handle.read(),
+ self.ontodoc,
+ basedir=os.path.dirname(filepath),
+ figformat=self.figformat,
+ figscale=self.figscale,
+ maxwidth=self.maxwidth,
+ )
+ docpp.figdir = self.figdir
+ if opts.shift: # pylint: disable=no-member
+ docpp.shift_header_levels(
+ int(opts.shift)
+ ) # pylint: disable=no-member
+ docpp.process()
+ del self.lines[i]
+ self.lines[i:i] = docpp.lines
+
+ def process(self):
+ """Perform all pre-processing steps."""
+ if not self._processed:
+ self.process_comments()
+ self.process_headers()
+ self.process_figures()
+ self.process_entities()
+ self.process_branches()
+ self.process_branchfigs()
+ self.process_branchdocs()
+ self.process_alls()
+ self.process_allfig()
+ self.process_includes()
+ self._processed = True
+
+ def write( # pylint: disable=too-many-arguments
+ self,
+ outfile,
+ fmt=None,
+ pandoc_option_files=(),
+ pandoc_options=(),
+ genfile=None,
+ verbose=True,
+ ):
+ """Writes documentation to `outfile`.
+
+ Parameters
+ ----------
+ outfile : str
+ File that the documentation is written to.
+ fmt : str
+ Output format. If it is "md" or "simple-html",
+ the built-in template generator is used. Otherwise
+ pandoc is used. If not given, the format is inferred
+ from the `outfile` name extension.
+ pandoc_option_files : sequence
+ Sequence with command line arguments provided to pandoc.
+ pandoc_options : sequence
+ Additional pandoc options overriding options read from
+ `pandoc_option_files`.
+ genfile : str
+ Store temporary generated markdown input file to pandoc
+ to this file (for debugging).
+ verbose : bool
+ Whether to show some messages when running pandoc.
+ """
+ self.process()
+ content = self.get_buffer()
+
+ substitutions = self.ontodoc.style.get("substitutions", [])
+ for reg, sub in substitutions:
+ content = re.sub(reg, sub, content)
+
+ fmt = get_format(outfile, default="html", fmt=fmt)
+ if fmt not in ("simple-html", "markdown", "md"): # Run pandoc
+ if not genfile:
+ with NamedTemporaryFile(mode="w+t", suffix=".md") as temp_file:
+ temp_file.write(content)
+ temp_file.flush()
+ genfile = temp_file.name
+
+ run_pandoc(
+ genfile,
+ outfile,
+ fmt,
+ pandoc_option_files=pandoc_option_files,
+ pandoc_options=pandoc_options,
+ verbose=verbose,
+ )
+ else:
+ with open(genfile, "wt") as handle:
+ handle.write(content)
+
+ run_pandoc(
+ genfile,
+ outfile,
+ fmt,
+ pandoc_option_files=pandoc_option_files,
+ pandoc_options=pandoc_options,
+ verbose=verbose,
+ )
+ else:
+ if verbose:
+ print("Writing:", outfile)
+ with open(outfile, "wt") as handle:
+ handle.write(content)
+
copy(self)
+
+
+¶Returns a copy of self.
+ +ontopy/ontodoc.py
def copy(self):
+ """Returns a copy of self."""
+ docpp = DocPP(
+ "",
+ self.ontodoc,
+ self.basedir,
+ figformat=self.figformat,
+ figscale=self.figscale,
+ maxwidth=self.maxwidth,
+ )
+ docpp.lines[:] = self.lines
+ docpp.figdir = self.figdir
+ return docpp
+
get_branches(self)
+
+
+¶Returns a list with all branch names as specified with %BRANCH +(in current and all included documents). The returned value is +cached for efficiency purposes and so that it is not lost after +processing branches.
+ +ontopy/ontodoc.py
def get_branches(self):
+ """Returns a list with all branch names as specified with %BRANCH
+ (in current and all included documents). The returned value is
+ cached for efficiency purposes and so that it is not lost after
+ processing branches."""
+ if self._branch_cache is None:
+ names = []
+ docpp = self.copy()
+ docpp.process_includes()
+ for line in docpp.lines:
+ if line.startswith("%BRANCH"):
+ names.append(shlex.split(line)[1])
+ self._branch_cache = names
+ return self._branch_cache
+
get_buffer(self)
+
+
+¶Returns the current buffer.
+ +ontopy/ontodoc.py
def get_buffer(self):
+ """Returns the current buffer."""
+ return "\n".join(self.lines)
+
process(self)
+
+
+¶Perform all pre-processing steps.
+ +ontopy/ontodoc.py
def process(self):
+ """Perform all pre-processing steps."""
+ if not self._processed:
+ self.process_comments()
+ self.process_headers()
+ self.process_figures()
+ self.process_entities()
+ self.process_branches()
+ self.process_branchfigs()
+ self.process_branchdocs()
+ self.process_alls()
+ self.process_allfig()
+ self.process_includes()
+ self._processed = True
+
process_allfig(self)
+
+
+¶Process all %ALLFIG directives.
+ +ontopy/ontodoc.py
def process_allfig(self): # pylint: disable=too-many-locals
+ """Process all %ALLFIG directives."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ALLFIG "):
+ tokens = shlex.split(line)
+ token = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ path="",
+ level=3,
+ terminated=0,
+ include_leaves=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="isA",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+ if token == "classes": # nosec
+ roots = onto.get_root_classes(imported=self.imported)
+ elif token in ("object_properties", "relations"):
+ roots = onto.get_root_object_properties(
+ imported=self.imported
+ )
+ elif token == "data_properties": # nosec
+ roots = onto.get_root_data_properties(
+ imported=self.imported
+ )
+ else:
+ raise InvalidTemplateError(
+ f"Invalid argument to %%ALLFIG: {token}"
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ sec = []
+ for root in roots:
+ name = asstring(root, link="{label}", ontology=onto)
+ filepath, _, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ opts.include_leaves, # pylint: disable=no-member
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+ title = f"Taxonomy of {name}."
+ sec.append(
+ self.ontodoc.get_header(title, int(opts.level))
+ ) # pylint: disable=no-member
+ sec.extend(
+ self.ontodoc.get_figure(
+ filepath, caption=title, width=width
+ ).split("\n")
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = sec
+
process_alls(self)
+
+
+¶Expand all %ALL specifications.
+ +ontopy/ontodoc.py
def process_alls(self):
+ """Expand all %ALL specifications."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ALL "):
+ tokens = shlex.split(line)
+ token = tokens[1]
+ opts = get_options(tokens[2:], header_level=3)
+ if token == "classes": # nosec
+ items = onto.classes(imported=self.imported)
+ elif token in ("object_properties", "relations"):
+ items = onto.object_properties(imported=self.imported)
+ elif token == "data_properties": # nosec
+ items = onto.data_properties(imported=self.imported)
+ elif token == "annotation_properties": # nosec
+ items = onto.annotation_properties(imported=self.imported)
+ elif token == "individuals": # nosec
+ items = onto.individuals(imported=self.imported)
+ else:
+ raise InvalidTemplateError(
+ f"Invalid argument to %%ALL: {token}"
+ )
+ items = sorted(items, key=get_label)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemsdoc(
+ items, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
process_branchdocs(self)
+
+
+¶Process all %BRANCHDOC and %BRANCHEAD directives.
+ +ontopy/ontodoc.py
def process_branchdocs(self): # pylint: disable=too-many-locals
+ """Process all %BRANCHDOC and %BRANCHEAD directives."""
+ onto = self.ontodoc.onto
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCHDOC ") or line.startswith(
+ "%BRANCHHEAD "
+ ):
+ with_branch = bool(line.startswith("%BRANCHDOC "))
+ tokens = shlex.split(line)
+ name = tokens[1]
+ title = camelsplit(name)
+ title = title[0].upper() + title[1:] + " branch"
+ opts = get_options(
+ tokens[2:],
+ level=2,
+ path="",
+ title=title,
+ caption=title + ".",
+ terminated=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="all",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ include_leaves = 1
+ filepath, leaves, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ include_leaves,
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+
+ sec = []
+ sec.append(
+ self.ontodoc.get_header(opts.title, int(opts.level))
+ ) # pylint: disable=no-member
+ sec.append(
+ self.ontodoc.get_figure(
+ filepath,
+ caption=opts.caption,
+ width=width, # pylint: disable=no-member
+ )
+ )
+ if with_branch:
+ include_leaves = 0
+ branch = filter_classes(
+ onto.get_branch(name, leaves, include_leaves),
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+ sec.append(
+ self.ontodoc.itemsdoc(
+ branch, int(opts.level + 1)
+ ) # pylint: disable=no-member
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = sec
+
process_branches(self)
+
+
+¶Expand all %BRANCH specifications.
+ +ontopy/ontodoc.py
def process_branches(self):
+ """Expand all %BRANCH specifications."""
+ onto = self.ontodoc.onto
+
+ # Get all branch names in final document
+ names = self.get_branches()
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCH "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ header_level=3,
+ terminated=1,
+ include_leaves=0,
+ namespaces="",
+ ontologies="",
+ )
+ leaves = (
+ names if opts.terminated else ()
+ ) # pylint: disable=no-member
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ branch = filter_classes(
+ onto.get_branch(
+ name, leaves, opts.include_leaves
+ ), # pylint: disable=no-member
+ included_namespaces=included_namespaces,
+ included_ontologies=included_ontologies,
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemsdoc(
+ branch, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
process_branchfigs(self)
+
+
+¶Process all %BRANCHFIG directives.
+ +ontopy/ontodoc.py
def process_branchfigs(self):
+ """Process all %BRANCHFIG directives."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%BRANCHFIG "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(
+ tokens[2:],
+ path="",
+ caption="",
+ terminated=1,
+ include_leaves=1,
+ strict_leaves=1,
+ width=0,
+ leaves="",
+ relations="all",
+ edgelabels=0,
+ rankdir="BT",
+ legend=1,
+ namespaces="",
+ ontologies="",
+ )
+
+ included_namespaces = (
+ opts.namespaces.split(",")
+ if opts.namespaces
+ else () # pylint: disable=no-member
+ )
+ included_ontologies = (
+ opts.ontologies.split(",")
+ if opts.ontologies
+ else () # pylint: disable=no-member
+ )
+
+ filepath, _, width = self._make_branchfig(
+ name,
+ opts.path, # pylint: disable=no-member
+ opts.terminated, # pylint: disable=no-member
+ opts.include_leaves, # pylint: disable=no-member
+ opts.strict_leaves, # pylint: disable=no-member
+ opts.width, # pylint: disable=no-member
+ opts.leaves, # pylint: disable=no-member
+ opts.relations, # pylint: disable=no-member
+ opts.edgelabels, # pylint: disable=no-member
+ opts.rankdir, # pylint: disable=no-member
+ opts.legend, # pylint: disable=no-member
+ included_namespaces,
+ included_ontologies,
+ )
+
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_figure(
+ filepath,
+ caption=opts.caption,
+ width=width, # pylint: disable=no-member
+ ).split("\n")
+
process_comments(self)
+
+
+¶Strips out comment lines starting with "%%".
+ +ontopy/ontodoc.py
def process_comments(self):
+ """Strips out comment lines starting with "%%"."""
+ self.lines = [line for line in self.lines if not line.startswith("%%")]
+
process_entities(self)
+
+
+¶Expand all %ENTITY specifications.
+ +ontopy/ontodoc.py
def process_entities(self):
+ """Expand all %ENTITY specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%ENTITY "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(tokens[2:], header_level=3)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.itemdoc(
+ name, int(opts.header_level) # pylint: disable=no-member
+ ).split("\n")
+
process_figures(self)
+
+
+¶Expand all %FIGURE specifications.
+ +ontopy/ontodoc.py
def process_figures(self):
+ """Expand all %FIGURE specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%FIGURE "):
+ tokens = shlex.split(line)
+ path = tokens[1]
+ opts = get_options(tokens[2:], caption="", width=0)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_figure(
+ os.path.join(self.basedir, path),
+ caption=opts.caption, # pylint: disable=no-member
+ width=opts.width, # pylint: disable=no-member
+ ).split("\n")
+
process_headers(self)
+
+
+¶Expand all %HEADER specifications.
+ +ontopy/ontodoc.py
def process_headers(self):
+ """Expand all %HEADER specifications."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%HEADER "):
+ tokens = shlex.split(line)
+ name = tokens[1]
+ opts = get_options(tokens[2:], level=1)
+ del self.lines[i]
+ self.lines[i:i] = self.ontodoc.get_header(
+ name, int(opts.level) # pylint: disable=no-member
+ ).split("\n")
+
process_includes(self)
+
+
+¶Process all %INCLUDE directives.
+ +ontopy/ontodoc.py
def process_includes(self):
+ """Process all %INCLUDE directives."""
+ for i, line in reversed(list(enumerate(self.lines))):
+ if line.startswith("%INCLUDE "):
+ tokens = shlex.split(line)
+ filepath = tokens[1]
+ opts = get_options(tokens[2:], shift=0)
+ with open(
+ os.path.join(self.basedir, filepath), "rt", encoding="utf8"
+ ) as handle:
+ docpp = DocPP(
+ handle.read(),
+ self.ontodoc,
+ basedir=os.path.dirname(filepath),
+ figformat=self.figformat,
+ figscale=self.figscale,
+ maxwidth=self.maxwidth,
+ )
+ docpp.figdir = self.figdir
+ if opts.shift: # pylint: disable=no-member
+ docpp.shift_header_levels(
+ int(opts.shift)
+ ) # pylint: disable=no-member
+ docpp.process()
+ del self.lines[i]
+ self.lines[i:i] = docpp.lines
+
shift_header_levels(self, shift)
+
+
+¶Shift header level of all hashtag-headers in buffer. Underline +headers are ignored.
+ +ontopy/ontodoc.py
def shift_header_levels(self, shift):
+ """Shift header level of all hashtag-headers in buffer. Underline
+ headers are ignored."""
+ if not shift:
+ return
+ pat = re.compile("^#+ ")
+ for i, line in enumerate(self.lines):
+ match = pat.match(line)
+ if match:
+ if shift > 0:
+ self.lines[i] = "#" * shift + line
+ elif shift < 0:
+ counter = match.end()
+ if shift > counter:
+ self.lines[i] = line.lstrip("# ")
+ else:
+ self.lines[i] = line[counter:]
+
write(self, outfile, fmt=None, pandoc_option_files=(), pandoc_options=(), genfile=None, verbose=True)
+
+
+¶Writes documentation to outfile
.
outfile : str
+ File that the documentation is written to.
+fmt : str
+ Output format. If it is "md" or "simple-html",
+ the built-in template generator is used. Otherwise
+ pandoc is used. If not given, the format is inferred
+ from the outfile
name extension.
+pandoc_option_files : sequence
+ Sequence with command line arguments provided to pandoc.
+pandoc_options : sequence
+ Additional pandoc options overriding options read from
+pandoc_option_files
.
+genfile : str
+ Store temporary generated markdown input file to pandoc
+ to this file (for debugging).
+verbose : bool
+ Whether to show some messages when running pandoc.
ontopy/ontodoc.py
def write( # pylint: disable=too-many-arguments
+ self,
+ outfile,
+ fmt=None,
+ pandoc_option_files=(),
+ pandoc_options=(),
+ genfile=None,
+ verbose=True,
+):
+ """Writes documentation to `outfile`.
+
+ Parameters
+ ----------
+ outfile : str
+ File that the documentation is written to.
+ fmt : str
+ Output format. If it is "md" or "simple-html",
+ the built-in template generator is used. Otherwise
+ pandoc is used. If not given, the format is inferred
+ from the `outfile` name extension.
+ pandoc_option_files : sequence
+ Sequence with command line arguments provided to pandoc.
+ pandoc_options : sequence
+ Additional pandoc options overriding options read from
+ `pandoc_option_files`.
+ genfile : str
+ Store temporary generated markdown input file to pandoc
+ to this file (for debugging).
+ verbose : bool
+ Whether to show some messages when running pandoc.
+ """
+ self.process()
+ content = self.get_buffer()
+
+ substitutions = self.ontodoc.style.get("substitutions", [])
+ for reg, sub in substitutions:
+ content = re.sub(reg, sub, content)
+
+ fmt = get_format(outfile, default="html", fmt=fmt)
+ if fmt not in ("simple-html", "markdown", "md"): # Run pandoc
+ if not genfile:
+ with NamedTemporaryFile(mode="w+t", suffix=".md") as temp_file:
+ temp_file.write(content)
+ temp_file.flush()
+ genfile = temp_file.name
+
+ run_pandoc(
+ genfile,
+ outfile,
+ fmt,
+ pandoc_option_files=pandoc_option_files,
+ pandoc_options=pandoc_options,
+ verbose=verbose,
+ )
+ else:
+ with open(genfile, "wt") as handle:
+ handle.write(content)
+
+ run_pandoc(
+ genfile,
+ outfile,
+ fmt,
+ pandoc_option_files=pandoc_option_files,
+ pandoc_options=pandoc_options,
+ verbose=verbose,
+ )
+ else:
+ if verbose:
+ print("Writing:", outfile)
+ with open(outfile, "wt") as handle:
+ handle.write(content)
+
+InvalidTemplateError (NameError)
+
+
+
+
+¶Raised on errors in template files.
+ +ontopy/ontodoc.py
class InvalidTemplateError(NameError):
+ """Raised on errors in template files."""
+
+OntoDoc
+
+
+
+¶A class for helping documentating ontologies.
+onto : Ontology instance + The ontology that should be documented. +style : dict | "html" | "markdown" | "markdown_tex" + A dict defining the following template strings (and substitutions):
+:header: Formats an header.
+ Substitutions: {level}, {label}
+:link: Formats a link.
+ Substitutions: {name}
+:point: Formats a point (list item).
+ Substitutions: {point}, {ontology}
+:points: Formats a list of points. Used within annotations.
+ Substitutions: {points}, {ontology}
+:annotation: Formats an annotation.
+ Substitutions: {key}, {value}, {ontology}
+:substitutions: list of ``(regex, sub)`` pairs for substituting
+ annotation values.
+
ontopy/ontodoc.py
class OntoDoc:
+ """A class for helping documentating ontologies.
+
+ Parameters
+ ----------
+ onto : Ontology instance
+ The ontology that should be documented.
+ style : dict | "html" | "markdown" | "markdown_tex"
+ A dict defining the following template strings (and substitutions):
+
+ :header: Formats an header.
+ Substitutions: {level}, {label}
+ :link: Formats a link.
+ Substitutions: {name}
+ :point: Formats a point (list item).
+ Substitutions: {point}, {ontology}
+ :points: Formats a list of points. Used within annotations.
+ Substitutions: {points}, {ontology}
+ :annotation: Formats an annotation.
+ Substitutions: {key}, {value}, {ontology}
+ :substitutions: list of ``(regex, sub)`` pairs for substituting
+ annotation values.
+ """
+
+ _markdown_style = {
+ "sep": "\n",
+ "figwidth": "{{ width={width:.0f}px }}",
+ "figure": "![{caption}]({path}){figwidth}\n",
+ "header": "\n{:#<{level}} {label} {{#{anchor}}}",
+ # Use ref instead of iri for local references in links
+ "link": "[{label}]({ref})",
+ "point": " - {point}\n",
+ "points": "\n\n{points}\n",
+ "annotation": "**{key}:** {value}\n",
+ "substitutions": [],
+ }
+ # Extra style settings for markdown+tex (e.g. pdf generation with pandoc)
+ _markdown_tex_extra_style = {
+ "substitutions": [
+ # logic/math symbols
+ ("\u2200", r"$\\forall$"),
+ ("\u2203", r"$\\exists$"),
+ ("\u2206", r"$\\nabla$"),
+ ("\u2227", r"$\\land$"),
+ ("\u2228", r"$\\lor$"),
+ ("\u2207", r"$\\nabla$"),
+ ("\u2212", r"-"),
+ ("->", r"$\\rightarrow$"),
+ # uppercase greek letters
+ ("\u0391", r"$\\Upalpha$"),
+ ("\u0392", r"$\\Upbeta$"),
+ ("\u0393", r"$\\Upgamma$"),
+ ("\u0394", r"$\\Updelta$"),
+ ("\u0395", r"$\\Upepsilon$"),
+ ("\u0396", r"$\\Upzeta$"),
+ ("\u0397", r"$\\Upeta$"),
+ ("\u0398", r"$\\Uptheta$"),
+ ("\u0399", r"$\\Upiota$"),
+ ("\u039a", r"$\\Upkappa$"),
+ ("\u039b", r"$\\Uplambda$"),
+ ("\u039c", r"$\\Upmu$"),
+ ("\u039d", r"$\\Upnu$"),
+ ("\u039e", r"$\\Upxi$"),
+ ("\u039f", r"$\\Upomekron$"),
+ ("\u03a0", r"$\\Uppi$"),
+ ("\u03a1", r"$\\Uprho$"),
+ ("\u03a3", r"$\\Upsigma$"), # no \u0302
+ ("\u03a4", r"$\\Uptau$"),
+ ("\u03a5", r"$\\Upupsilon$"),
+ ("\u03a6", r"$\\Upvarphi$"),
+ ("\u03a7", r"$\\Upchi$"),
+ ("\u03a8", r"$\\Uppsi$"),
+ ("\u03a9", r"$\\Upomega$"),
+ # lowercase greek letters
+ ("\u03b1", r"$\\upalpha$"),
+ ("\u03b2", r"$\\upbeta$"),
+ ("\u03b3", r"$\\upgamma$"),
+ ("\u03b4", r"$\\updelta$"),
+ ("\u03b5", r"$\\upepsilon$"),
+ ("\u03b6", r"$\\upzeta$"),
+ ("\u03b7", r"$\\upeta$"),
+ ("\u03b8", r"$\\uptheta$"),
+ ("\u03b9", r"$\\upiota$"),
+ ("\u03ba", r"$\\upkappa$"),
+ ("\u03bb", r"$\\uplambda$"),
+ ("\u03bc", r"$\\upmu$"),
+ ("\u03bd", r"$\\upnu$"),
+ ("\u03be", r"$\\upxi$"),
+ ("\u03bf", r"o"), # no \upomicron
+ ("\u03c0", r"$\\uppi$"),
+ ("\u03c1", r"$\\uprho$"),
+ ("\u03c2", r"$\\upvarsigma$"),
+ ("\u03c3", r"$\\upsigma$"),
+ ("\u03c4", r"$\\uptau$"),
+ ("\u03c5", r"$\\upupsilon$"),
+ ("\u03c6", r"$\\upvarphi$"),
+ ("\u03c7", r"$\\upchi$"),
+ ("\u03c8", r"$\\uppsi$"),
+ ("\u03c9", r"$\\upomega$"),
+ # acutes, accents, etc...
+ ("\u03ae", r"$\\acute{\\upeta}$"),
+ ("\u1e17", r"$\\acute{\\bar{\\mathrm{e}}}$"),
+ ("\u03ac", r"$\\acute{\\upalpha}$"),
+ ("\u00e1", r"$\\acute{\\mathrm{a}}$"),
+ ("\u03cc", r"$\\acute{o}$"), # no \upomicron
+ ("\u014d", r"$\\bar{\\mathrm{o}}$"),
+ ("\u1f45", r"$\\acute{o}$"), # no \omicron
+ ],
+ }
+ _html_style = {
+ "sep": "<p>\n",
+ "figwidth": 'width="{width:.0f}"',
+ "figure": '<img src="{path}" alt="{caption}"{figwidth}>',
+ "header": '<h{level} id="{anchor}">{label}</h{level}>',
+ "link": '<a href="{ref}">{label}</a>',
+ "point": " <li>{point}</li>\n",
+ "points": " <ul>\n {points}\n </ul>\n",
+ "annotation": " <dd><strong>{key}:</strong>\n{value} </dd>\n",
+ "substitutions": [
+ (r"&", r"‒"),
+ (r"<p>", r"<p>\n\n"),
+ (r"\u2018([^\u2019]*)\u2019", r"<q>\1</q>"),
+ (r"\u2019", r"'"),
+ (r"\u2260", r"≠"),
+ (r"\u2264", r"≤"),
+ (r"\u2265", r"≥"),
+ (r"\u226A", r"&x226A;"),
+ (r"\u226B", r"&x226B;"),
+ (r'"Y$', r""), # strange noice added by owlready2
+ ],
+ }
+
+ def __init__(self, onto, style="markdown"):
+ if isinstance(style, str):
+ if style == "markdown_tex":
+ style = self._markdown_style.copy()
+ style.update(self._markdown_tex_extra_style)
+ else:
+ style = getattr(self, f"_{style}_style")
+ self.onto = onto
+ self.style = style
+ self.url_regex = re.compile(r"https?:\/\/[^\s ]+")
+
+ def get_default_template(self):
+ """Returns default template."""
+ title = os.path.splitext(
+ os.path.basename(self.onto.base_iri.rstrip("/#"))
+ )[0]
+ irilink = self.style.get("link", "{name}").format(
+ iri=self.onto.base_iri,
+ name=self.onto.base_iri,
+ ref=self.onto.base_iri,
+ label=self.onto.base_iri,
+ lowerlabel=self.onto.base_iri,
+ )
+ template = dedent(
+ """\
+ %HEADER {title}
+ Documentation of {irilink}
+
+ %HEADER Relations level=2
+ %ALL object_properties
+
+ %HEADER Classes level=2
+ %ALL classes
+
+ %HEADER Individuals level=2
+ %ALL individuals
+
+ %HEADER Appendix level=1
+ %HEADER "Relation taxonomies" level=2
+ %ALLFIG object_properties
+
+ %HEADER "Class taxonomies" level=2
+ %ALLFIG classes
+ """
+ ).format(ontology=self.onto, title=title, irilink=irilink)
+ return template
+
+ def get_header(self, label, header_level=1, anchor=None):
+ """Returns `label` formatted as a header of given level."""
+ header_style = self.style.get("header", "{label}\n")
+ return header_style.format(
+ "",
+ level=header_level,
+ label=label,
+ anchor=anchor if anchor else label.lower().replace(" ", "-"),
+ )
+
+ def get_figure(self, path, caption="", width=None):
+ """Returns a formatted insert-figure-directive."""
+ figwidth_style = self.style.get("figwidth", "")
+ figure_style = self.style.get("figure", "")
+ figwidth = figwidth_style.format(width=width) if width else ""
+ return figure_style.format(
+ path=path, caption=caption, figwidth=figwidth
+ )
+
+ def itemdoc(
+ self, item, header_level=3, show_disjoints=False
+ ): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
+ """Returns documentation of `item`.
+
+ Parameters
+ ----------
+ item : obj | label
+ The class, individual or relation to document.
+ header_level : int
+ Header level. Defaults to 3.
+ show_disjoints : Bool
+ Whether to show `disjoint_with` relations.
+ """
+ onto = self.onto
+ if isinstance(item, str):
+ item = self.onto.get_by_label(item)
+
+ header_style = self.style.get("header", "{label}\n")
+ link_style = self.style.get("link", "{name}")
+ point_style = self.style.get("point", "{point}")
+ points_style = self.style.get("points", "{points}")
+ annotation_style = self.style.get("annotation", "{key}: {value}\n")
+ substitutions = self.style.get("substitutions", [])
+
+ # Logical "sorting" of annotations
+ order = {
+ "definition": "00",
+ "axiom": "01",
+ "theorem": "02",
+ "elucidation": "03",
+ "domain": "04",
+ "range": "05",
+ "example": "06",
+ }
+
+ doc = []
+
+ # Header
+ label = get_label(item)
+ iriname = item.iri.partition("#")[2]
+ anchor = iriname if iriname else label.lower()
+ doc.append(
+ header_style.format(
+ "",
+ level=header_level,
+ label=label,
+ anchor=anchor,
+ )
+ )
+
+ # Add warning about missing prefLabel
+ if not hasattr(item, "prefLabel") or not item.prefLabel.first():
+ doc.append(
+ annotation_style.format(
+ key="Warning", value="Missing prefLabel"
+ )
+ )
+
+ # Add iri
+ doc.append(
+ annotation_style.format(
+ key="IRI",
+ value=asstring(item.iri, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+
+ # Add annotations
+ if isinstance(item, owlready2.Thing):
+ annotations = item.get_individual_annotations()
+ else:
+ annotations = item.get_annotations()
+
+ for key in sorted(
+ annotations.keys(), key=lambda key: order.get(key, key)
+ ):
+ for value in annotations[key]:
+ value = str(value)
+ if self.url_regex.match(value):
+ doc.append(
+ annotation_style.format(
+ key=key,
+ value=asstring(value, link_style, ontology=onto),
+ )
+ )
+ else:
+ for reg, sub in substitutions:
+ value = re.sub(reg, sub, value)
+ doc.append(annotation_style.format(key=key, value=value))
+
+ # ...add relations from is_a
+ points = []
+ non_prop = (
+ owlready2.ThingClass, # owlready2.Restriction,
+ owlready2.And,
+ owlready2.Or,
+ owlready2.Not,
+ )
+ for prop in item.is_a:
+ if isinstance(prop, non_prop) or (
+ isinstance(item, owlready2.PropertyClass)
+ and isinstance(prop, owlready2.PropertyClass)
+ ):
+ points.append(
+ point_style.format(
+ point="is_a "
+ + asstring(prop, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+ else:
+ points.append(
+ point_style.format(
+ point=asstring(prop, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+
+ # ...add equivalent_to relations
+ for entity in item.equivalent_to:
+ points.append(
+ point_style.format(
+ point="equivalent_to "
+ + asstring(entity, link_style, ontology=onto)
+ )
+ )
+
+ # ...add disjoint_with relations
+ if show_disjoints and hasattr(item, "disjoint_with"):
+ subjects = set(item.disjoint_with(reduce=True))
+ points.append(
+ point_style.format(
+ point="disjoint_with "
+ + ", ".join(
+ asstring(s, link_style, ontology=onto) for s in subjects
+ ),
+ ontology=onto,
+ )
+ )
+
+ # ...add disjoint_unions
+ if hasattr(item, "disjoint_unions"):
+ for unions in item.disjoint_unions:
+ string = ", ".join(
+ asstring(u, link_style, ontology=onto) for u in unions
+ )
+ points.append(
+ point_style.format(
+ point=f"disjoint_union_of {string}", ontology=onto
+ )
+ )
+
+ # ...add inverse_of relations
+ if hasattr(item, "inverse_property") and item.inverse_property:
+ points.append(
+ point_style.format(
+ point="inverse_of "
+ + asstring(item.inverse_property, link_style, ontology=onto)
+ )
+ )
+
+ # ...add domain restrictions
+ for domain in getattr(item, "domain", ()):
+ points.append(
+ point_style.format(
+ point="domain "
+ + asstring(domain, link_style, ontology=onto)
+ )
+ )
+
+ # ...add range restrictions
+ for restriction in getattr(item, "range", ()):
+ points.append(
+ point_style.format(
+ point="range "
+ + asstring(restriction, link_style, ontology=onto)
+ )
+ )
+
+ # Add points (from is_a)
+ if points:
+ value = points_style.format(points="".join(points), ontology=onto)
+ doc.append(
+ annotation_style.format(
+ key="Subclass of", value=value, ontology=onto
+ )
+ )
+
+ # Instances (individuals)
+ if hasattr(item, "instances"):
+ points = []
+
+ for instance in item.instances():
+ if isinstance(instance.is_instance_of, property):
+ warnings.warn(
+ f'Ignoring instance "{instance}" which is both and '
+ "indivudual and class. Ontodoc does not support "
+ "punning at the present moment."
+ )
+ continue
+ if item in instance.is_instance_of:
+ points.append(
+ point_style.format(
+ point=asstring(instance, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+ if points:
+ value = points_style.format(
+ points="".join(points), ontology=onto
+ )
+ doc.append(
+ annotation_style.format(
+ key="Individuals", value=value, ontology=onto
+ )
+ )
+
+ return "\n".join(doc)
+
+ def itemsdoc(self, items, header_level=3):
+ """Returns documentation of `items`."""
+ sep_style = self.style.get("sep", "\n")
+ doc = []
+ for item in items:
+ doc.append(self.itemdoc(item, header_level))
+ doc.append(sep_style.format(ontology=self.onto))
+ return "\n".join(doc)
+
get_default_template(self)
+
+
+¶Returns default template.
+ +ontopy/ontodoc.py
def get_default_template(self):
+ """Returns default template."""
+ title = os.path.splitext(
+ os.path.basename(self.onto.base_iri.rstrip("/#"))
+ )[0]
+ irilink = self.style.get("link", "{name}").format(
+ iri=self.onto.base_iri,
+ name=self.onto.base_iri,
+ ref=self.onto.base_iri,
+ label=self.onto.base_iri,
+ lowerlabel=self.onto.base_iri,
+ )
+ template = dedent(
+ """\
+ %HEADER {title}
+ Documentation of {irilink}
+
+ %HEADER Relations level=2
+ %ALL object_properties
+
+ %HEADER Classes level=2
+ %ALL classes
+
+ %HEADER Individuals level=2
+ %ALL individuals
+
+ %HEADER Appendix level=1
+ %HEADER "Relation taxonomies" level=2
+ %ALLFIG object_properties
+
+ %HEADER "Class taxonomies" level=2
+ %ALLFIG classes
+ """
+ ).format(ontology=self.onto, title=title, irilink=irilink)
+ return template
+
get_figure(self, path, caption='', width=None)
+
+
+¶Returns a formatted insert-figure-directive.
+ +ontopy/ontodoc.py
def get_figure(self, path, caption="", width=None):
+ """Returns a formatted insert-figure-directive."""
+ figwidth_style = self.style.get("figwidth", "")
+ figure_style = self.style.get("figure", "")
+ figwidth = figwidth_style.format(width=width) if width else ""
+ return figure_style.format(
+ path=path, caption=caption, figwidth=figwidth
+ )
+
get_header(self, label, header_level=1, anchor=None)
+
+
+¶Returns label
formatted as a header of given level.
ontopy/ontodoc.py
def get_header(self, label, header_level=1, anchor=None):
+ """Returns `label` formatted as a header of given level."""
+ header_style = self.style.get("header", "{label}\n")
+ return header_style.format(
+ "",
+ level=header_level,
+ label=label,
+ anchor=anchor if anchor else label.lower().replace(" ", "-"),
+ )
+
itemdoc(self, item, header_level=3, show_disjoints=False)
+
+
+¶Returns documentation of item
.
item : obj | label
+ The class, individual or relation to document.
+header_level : int
+ Header level. Defaults to 3.
+show_disjoints : Bool
+ Whether to show disjoint_with
relations.
ontopy/ontodoc.py
def itemdoc(
+ self, item, header_level=3, show_disjoints=False
+): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
+ """Returns documentation of `item`.
+
+ Parameters
+ ----------
+ item : obj | label
+ The class, individual or relation to document.
+ header_level : int
+ Header level. Defaults to 3.
+ show_disjoints : Bool
+ Whether to show `disjoint_with` relations.
+ """
+ onto = self.onto
+ if isinstance(item, str):
+ item = self.onto.get_by_label(item)
+
+ header_style = self.style.get("header", "{label}\n")
+ link_style = self.style.get("link", "{name}")
+ point_style = self.style.get("point", "{point}")
+ points_style = self.style.get("points", "{points}")
+ annotation_style = self.style.get("annotation", "{key}: {value}\n")
+ substitutions = self.style.get("substitutions", [])
+
+ # Logical "sorting" of annotations
+ order = {
+ "definition": "00",
+ "axiom": "01",
+ "theorem": "02",
+ "elucidation": "03",
+ "domain": "04",
+ "range": "05",
+ "example": "06",
+ }
+
+ doc = []
+
+ # Header
+ label = get_label(item)
+ iriname = item.iri.partition("#")[2]
+ anchor = iriname if iriname else label.lower()
+ doc.append(
+ header_style.format(
+ "",
+ level=header_level,
+ label=label,
+ anchor=anchor,
+ )
+ )
+
+ # Add warning about missing prefLabel
+ if not hasattr(item, "prefLabel") or not item.prefLabel.first():
+ doc.append(
+ annotation_style.format(
+ key="Warning", value="Missing prefLabel"
+ )
+ )
+
+ # Add iri
+ doc.append(
+ annotation_style.format(
+ key="IRI",
+ value=asstring(item.iri, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+
+ # Add annotations
+ if isinstance(item, owlready2.Thing):
+ annotations = item.get_individual_annotations()
+ else:
+ annotations = item.get_annotations()
+
+ for key in sorted(
+ annotations.keys(), key=lambda key: order.get(key, key)
+ ):
+ for value in annotations[key]:
+ value = str(value)
+ if self.url_regex.match(value):
+ doc.append(
+ annotation_style.format(
+ key=key,
+ value=asstring(value, link_style, ontology=onto),
+ )
+ )
+ else:
+ for reg, sub in substitutions:
+ value = re.sub(reg, sub, value)
+ doc.append(annotation_style.format(key=key, value=value))
+
+ # ...add relations from is_a
+ points = []
+ non_prop = (
+ owlready2.ThingClass, # owlready2.Restriction,
+ owlready2.And,
+ owlready2.Or,
+ owlready2.Not,
+ )
+ for prop in item.is_a:
+ if isinstance(prop, non_prop) or (
+ isinstance(item, owlready2.PropertyClass)
+ and isinstance(prop, owlready2.PropertyClass)
+ ):
+ points.append(
+ point_style.format(
+ point="is_a "
+ + asstring(prop, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+ else:
+ points.append(
+ point_style.format(
+ point=asstring(prop, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+
+ # ...add equivalent_to relations
+ for entity in item.equivalent_to:
+ points.append(
+ point_style.format(
+ point="equivalent_to "
+ + asstring(entity, link_style, ontology=onto)
+ )
+ )
+
+ # ...add disjoint_with relations
+ if show_disjoints and hasattr(item, "disjoint_with"):
+ subjects = set(item.disjoint_with(reduce=True))
+ points.append(
+ point_style.format(
+ point="disjoint_with "
+ + ", ".join(
+ asstring(s, link_style, ontology=onto) for s in subjects
+ ),
+ ontology=onto,
+ )
+ )
+
+ # ...add disjoint_unions
+ if hasattr(item, "disjoint_unions"):
+ for unions in item.disjoint_unions:
+ string = ", ".join(
+ asstring(u, link_style, ontology=onto) for u in unions
+ )
+ points.append(
+ point_style.format(
+ point=f"disjoint_union_of {string}", ontology=onto
+ )
+ )
+
+ # ...add inverse_of relations
+ if hasattr(item, "inverse_property") and item.inverse_property:
+ points.append(
+ point_style.format(
+ point="inverse_of "
+ + asstring(item.inverse_property, link_style, ontology=onto)
+ )
+ )
+
+ # ...add domain restrictions
+ for domain in getattr(item, "domain", ()):
+ points.append(
+ point_style.format(
+ point="domain "
+ + asstring(domain, link_style, ontology=onto)
+ )
+ )
+
+ # ...add range restrictions
+ for restriction in getattr(item, "range", ()):
+ points.append(
+ point_style.format(
+ point="range "
+ + asstring(restriction, link_style, ontology=onto)
+ )
+ )
+
+ # Add points (from is_a)
+ if points:
+ value = points_style.format(points="".join(points), ontology=onto)
+ doc.append(
+ annotation_style.format(
+ key="Subclass of", value=value, ontology=onto
+ )
+ )
+
+ # Instances (individuals)
+ if hasattr(item, "instances"):
+ points = []
+
+ for instance in item.instances():
+ if isinstance(instance.is_instance_of, property):
+ warnings.warn(
+ f'Ignoring instance "{instance}" which is both and '
+ "indivudual and class. Ontodoc does not support "
+ "punning at the present moment."
+ )
+ continue
+ if item in instance.is_instance_of:
+ points.append(
+ point_style.format(
+ point=asstring(instance, link_style, ontology=onto),
+ ontology=onto,
+ )
+ )
+ if points:
+ value = points_style.format(
+ points="".join(points), ontology=onto
+ )
+ doc.append(
+ annotation_style.format(
+ key="Individuals", value=value, ontology=onto
+ )
+ )
+
+ return "\n".join(doc)
+
itemsdoc(self, items, header_level=3)
+
+
+¶Returns documentation of items
.
ontopy/ontodoc.py
def itemsdoc(self, items, header_level=3):
+ """Returns documentation of `items`."""
+ sep_style = self.style.get("sep", "\n")
+ doc = []
+ for item in items:
+ doc.append(self.itemdoc(item, header_level))
+ doc.append(sep_style.format(ontology=self.onto))
+ return "\n".join(doc)
+
append_pandoc_options(options, updates)
+
+
+¶Append updates
to pandoc options options
.
options : sequence
+ Sequence with initial Pandoc options.
+updates : sequence of str
+ Sequence of strings of the form "--longoption=value", where
+ longoption
is a valid pandoc long option and value
is the
+ new value. The "=value" part is optional.
Strings of the form "no-longoption" will filter out "--longoption"
+from `options`.
+
new_options : list + Updated pandoc options.
+ +ontopy/ontodoc.py
def append_pandoc_options(options, updates):
+ """Append `updates` to pandoc options `options`.
+
+ Parameters
+ ----------
+ options : sequence
+ Sequence with initial Pandoc options.
+ updates : sequence of str
+ Sequence of strings of the form "--longoption=value", where
+ ``longoption`` is a valid pandoc long option and ``value`` is the
+ new value. The "=value" part is optional.
+
+ Strings of the form "no-longoption" will filter out "--longoption"
+ from `options`.
+
+ Returns
+ -------
+ new_options : list
+ Updated pandoc options.
+ """
+ # Valid pandoc options starting with "--no-XXX"
+ no_options = set("no-highlight")
+
+ if not updates:
+ return list(options)
+
+ curated_updates = {}
+ for update in updates:
+ key, sep, value = update.partition("=")
+ curated_updates[key.lstrip("-")] = value if sep else None
+ filter_out = set(
+ _
+ for _ in curated_updates
+ if _.startswith("no-") and _ not in no_options
+ )
+ _filter_out = set(f"--{_[3:]}" for _ in filter_out)
+ new_options = [
+ opt for opt in options if opt.partition("=")[0] not in _filter_out
+ ]
+ new_options.extend(
+ [
+ f"--{key}" if value is None else f"--{key}={value}"
+ for key, value in curated_updates.items()
+ if key not in filter_out
+ ]
+ )
+ return new_options
+
get_docpp(ontodoc, infile, figdir='genfigs', figformat='png', maxwidth=None, imported=False)
+
+
+¶Read infile
and return a new docpp instance.
ontopy/ontodoc.py
def get_docpp( # pylint: disable=too-many-arguments
+ ontodoc,
+ infile,
+ figdir="genfigs",
+ figformat="png",
+ maxwidth=None,
+ imported=False,
+):
+ """Read `infile` and return a new docpp instance."""
+ if infile:
+ with open(infile, "rt") as handle:
+ template = handle.read()
+ basedir = os.path.dirname(infile)
+ else:
+ template = ontodoc.get_default_template()
+ basedir = "."
+
+ docpp = DocPP(
+ template,
+ ontodoc,
+ basedir=basedir,
+ figdir=figdir,
+ figformat=figformat,
+ maxwidth=maxwidth,
+ imported=imported,
+ )
+
+ return docpp
+
get_figformat(fmt)
+
+
+¶Infer preferred figure format from output format.
+ +ontopy/ontodoc.py
def get_figformat(fmt):
+ """Infer preferred figure format from output format."""
+ if fmt == "pdf":
+ figformat = "pdf" # XXX
+ elif "html" in fmt:
+ figformat = "svg"
+ else:
+ figformat = "png"
+ return figformat
+
get_maxwidth(fmt)
+
+
+¶Infer preferred max figure width from output format.
+ +ontopy/ontodoc.py
def get_maxwidth(fmt):
+ """Infer preferred max figure width from output format."""
+ if fmt == "pdf":
+ maxwidth = 668
+ else:
+ maxwidth = 1024
+ return maxwidth
+
get_options(opts, **kwargs)
+
+
+¶Returns a dict with options from the sequence opts
with
+"name=value" pairs. Valid option names and default values are
+provided with the keyword arguments.
ontopy/ontodoc.py
def get_options(opts, **kwargs):
+ """Returns a dict with options from the sequence `opts` with
+ "name=value" pairs. Valid option names and default values are
+ provided with the keyword arguments."""
+ res = AttributeDict(kwargs)
+ for opt in opts:
+ if "=" not in opt:
+ raise InvalidTemplateError(
+ f'Missing "=" in template option: {opt!r}'
+ )
+ name, value = opt.split("=", 1)
+ if name not in res:
+ raise InvalidTemplateError(f"Invalid template option: {name!r}")
+ res_type = type(res[name])
+ res[name] = res_type(value)
+ return res
+
get_style(fmt)
+
+
+¶Infer style from output format.
+ +ontopy/ontodoc.py
def get_style(fmt):
+ """Infer style from output format."""
+ if fmt == "simple-html":
+ style = "html"
+ elif fmt in ("tex", "latex", "pdf"):
+ style = "markdown_tex"
+ else:
+ style = "markdown"
+ return style
+
load_pandoc_option_file(yamlfile)
+
+
+¶Loads pandoc options from yamlfile
and return a list with
+corresponding pandoc command line arguments.
ontopy/ontodoc.py
def load_pandoc_option_file(yamlfile):
+ """Loads pandoc options from `yamlfile` and return a list with
+ corresponding pandoc command line arguments."""
+ with open(yamlfile) as handle:
+ pandoc_options = yaml.safe_load(handle)
+ options = pandoc_options.pop("input-files", [])
+ variables = pandoc_options.pop("variables", {})
+
+ for key, value in pandoc_options.items():
+ if isinstance(value, bool):
+ if value:
+ options.append(f"--{key}")
+ else:
+ options.append(f"--{key}={value}")
+
+ for key, value in variables.items():
+ if key == "date" and value == "now":
+ value = time.strftime("%B %d, %Y")
+ options.append(f"--variable={key}:{value}")
+
+ return options
+
run_pandoc(genfile, outfile, fmt, pandoc_option_files=(), pandoc_options=(), verbose=True)
+
+
+¶Runs pandoc.
+genfile : str
+ Name of markdown input file.
+outfile : str
+ Output file name.
+fmt : str
+ Output format.
+pandoc_option_files : sequence
+ List of files with additional pandoc options. Default is to read
+ "pandoc-options.yaml" and "pandoc-FORMAT-options.yml", where
+ FORMAT
is the output format.
+pandoc_options : sequence
+ Additional pandoc options overriding options read from
+ pandoc_option_files
.
+verbose : bool
+ Whether to print the pandoc command before execution.
subprocess.CalledProcessError
+ If the pandoc process returns with non-zero status. The returncode
+ attribute will hold the exit code.
ontopy/ontodoc.py
def run_pandoc( # pylint: disable=too-many-arguments
+ genfile,
+ outfile,
+ fmt,
+ pandoc_option_files=(),
+ pandoc_options=(),
+ verbose=True,
+):
+ """Runs pandoc.
+
+ Parameters
+ ----------
+ genfile : str
+ Name of markdown input file.
+ outfile : str
+ Output file name.
+ fmt : str
+ Output format.
+ pandoc_option_files : sequence
+ List of files with additional pandoc options. Default is to read
+ "pandoc-options.yaml" and "pandoc-FORMAT-options.yml", where
+ `FORMAT` is the output format.
+ pandoc_options : sequence
+ Additional pandoc options overriding options read from
+ `pandoc_option_files`.
+ verbose : bool
+ Whether to print the pandoc command before execution.
+
+ Raises
+ ------
+ subprocess.CalledProcessError
+ If the pandoc process returns with non-zero status. The `returncode`
+ attribute will hold the exit code.
+ """
+ # Create pandoc argument list
+ args = [genfile]
+ files = ["pandoc-options.yaml", f"pandoc-{fmt}-options.yaml"]
+ if pandoc_option_files:
+ files = pandoc_option_files
+ for fname in files:
+ if os.path.exists(fname):
+ args.extend(load_pandoc_option_file(fname))
+ else:
+ warnings.warn(f"missing pandoc option file: {fname}")
+
+ # Update pandoc argument list
+ args = append_pandoc_options(args, pandoc_options)
+
+ # pdf output requires a special attention...
+ if fmt == "pdf":
+ pdf_engine = "pdflatex"
+ for arg in args:
+ if arg.startswith("--pdf-engine"):
+ pdf_engine = arg.split("=", 1)[1]
+ break
+ with TemporaryDirectory() as tmpdir:
+ run_pandoc_pdf(tmpdir, pdf_engine, outfile, args, verbose=verbose)
+ else:
+ args.append(f"--output={outfile}")
+ cmd = ["pandoc"] + args
+ if verbose:
+ print()
+ print("* Executing command:")
+ print(" ".join(shlex.quote(_) for _ in cmd))
+ subprocess.check_call(cmd) # nosec
+
run_pandoc_pdf(latex_dir, pdf_engine, outfile, args, verbose=True)
+
+
+¶Run pandoc for pdf generation.
+ +ontopy/ontodoc.py
def run_pandoc_pdf(latex_dir, pdf_engine, outfile, args, verbose=True):
+ """Run pandoc for pdf generation."""
+ basename = os.path.join(
+ latex_dir, os.path.splitext(os.path.basename(outfile))[0]
+ )
+
+ # Run pandoc
+ texfile = basename + ".tex"
+ args.append(f"--output={texfile}")
+ cmd = ["pandoc"] + args
+ if verbose:
+ print()
+ print("* Executing commands:")
+ print(" ".join(shlex.quote(s) for s in cmd))
+ subprocess.check_call(cmd) # nosec
+
+ # Fixing tex output
+ texfile2 = basename + "2.tex"
+ with open(texfile, "rt") as handle:
+ content = handle.read().replace(r"\$\Uptheta\$", r"$\Uptheta$")
+ with open(texfile2, "wt") as handle:
+ handle.write(content)
+
+ # Run latex
+ pdffile = basename + "2.pdf"
+ cmd = [
+ pdf_engine,
+ texfile2,
+ "-halt-on-error",
+ f"-output-directory={latex_dir}",
+ ]
+ if verbose:
+ print()
+ print(" ".join(shlex.quote(s) for s in cmd))
+ output = subprocess.check_output(cmd, timeout=60) # nosec
+ output = subprocess.check_output(cmd, timeout=60) # nosec
+
+ # Workaround for non-working "-output-directory" latex option
+ if not os.path.exists(pdffile):
+ if os.path.exists(os.path.basename(pdffile)):
+ pdffile = os.path.basename(pdffile)
+ for ext in "aux", "out", "toc", "log":
+ filename = os.path.splitext(pdffile)[0] + "." + ext
+ if os.path.exists(filename):
+ os.remove(filename)
+ else:
+ print()
+ print(output)
+ print()
+ raise RuntimeError("latex did not produce pdf file: " + pdffile)
+
+ # Copy pdffile
+ if not os.path.exists(outfile) or not os.path.samefile(pdffile, outfile):
+ if verbose:
+ print()
+ print(f"move {pdffile} to {outfile}")
+ shutil.move(pdffile, outfile)
+
A module adding additional functionality to owlready2.
+If desirable some of these additions may be moved back into owlready2.
+ + + +
+BlankNode
+
+
+
+¶Represents a blank node.
+A blank node is a node that is not a literal and has no IRI. +Resources represented by blank nodes are also called anonumous resources. +Only the subject or object in an RDF triple can be a blank node.
+ +ontopy/ontology.py
class BlankNode:
+ """Represents a blank node.
+
+ A blank node is a node that is not a literal and has no IRI.
+ Resources represented by blank nodes are also called anonumous resources.
+ Only the subject or object in an RDF triple can be a blank node.
+ """
+
+ def __init__(self, onto: Union[World, Ontology], storid: int):
+ """Initiate a blank node.
+
+ Args:
+ onto: Ontology or World instance.
+ storid: The storage id of the blank node.
+ """
+ if storid >= 0:
+ raise ValueError(
+ f"A BlankNode is supposed to have a negative storid: {storid}"
+ )
+ self.onto = onto
+ self.storid = storid
+
+ def __repr__(self):
+ return repr(f"_:b{-self.storid}")
+
+ def __hash__(self):
+ return hash((self.onto, self.storid))
+
+ def __eq__(self, other):
+ """For now blank nodes always compare true against each other."""
+ return isinstance(other, BlankNode)
+
__init__(self, onto, storid)
+
+
+ special
+
+
+¶Initiate a blank node.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
onto |
+ Union[ontopy.ontology.World, ontopy.ontology.Ontology] |
+ Ontology or World instance. |
+ required | +
storid |
+ int |
+ The storage id of the blank node. |
+ required | +
ontopy/ontology.py
def __init__(self, onto: Union[World, Ontology], storid: int):
+ """Initiate a blank node.
+
+ Args:
+ onto: Ontology or World instance.
+ storid: The storage id of the blank node.
+ """
+ if storid >= 0:
+ raise ValueError(
+ f"A BlankNode is supposed to have a negative storid: {storid}"
+ )
+ self.onto = onto
+ self.storid = storid
+
+Ontology (Ontology)
+
+
+
+
+¶A generic class extending owlready2.Ontology.
+ +ontopy/ontology.py
class Ontology(owlready2.Ontology): # pylint: disable=too-many-public-methods
+ """A generic class extending owlready2.Ontology."""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.label_annotations = DEFAULT_LABEL_ANNOTATIONS[:]
+ self.prefix = None
+
+ # Name of special unlabeled entities, like Thing, Nothing, etc...
+ _special_labels = None
+
+ # Some properties for customising dir() listing - useful in
+ # interactive sessions...
+ _dir_preflabel = isinteractive()
+ _dir_label = isinteractive()
+ _dir_name = False
+ _dir_imported = isinteractive()
+ dir_preflabel = property(
+ fget=lambda self: self._dir_preflabel,
+ fset=lambda self, v: setattr(self, "_dir_preflabel", bool(v)),
+ doc="Whether to include entity prefLabel in dir() listing.",
+ )
+ dir_label = property(
+ fget=lambda self: self._dir_label,
+ fset=lambda self, v: setattr(self, "_dir_label", bool(v)),
+ doc="Whether to include entity label in dir() listing.",
+ )
+ dir_name = property(
+ fget=lambda self: self._dir_name,
+ fset=lambda self, v: setattr(self, "_dir_name", bool(v)),
+ doc="Whether to include entity name in dir() listing.",
+ )
+ dir_imported = property(
+ fget=lambda self: self._dir_imported,
+ fset=lambda self, v: setattr(self, "_dir_imported", bool(v)),
+ doc="Whether to include imported ontologies in dir() listing.",
+ )
+
+ # Other settings
+ _colon_in_label = False
+ colon_in_label = property(
+ fget=lambda self: self._colon_in_label,
+ fset=lambda self, v: setattr(self, "_colon_in_label", bool(v)),
+ doc="Whether to accept colon in name-part of IRI. "
+ "If true, the name cannot be prefixed.",
+ )
+
+ def __dir__(self):
+ dirset = set(super().__dir__())
+ lst = list(self.get_entities(imported=self._dir_imported))
+ if self._dir_preflabel:
+ dirset.update(
+ str(dir.prefLabel.first())
+ for dir in lst
+ if hasattr(dir, "prefLabel")
+ )
+ if self._dir_label:
+ dirset.update(
+ str(dir.label.first()) for dir in lst if hasattr(dir, "label")
+ )
+ if self._dir_name:
+ dirset.update(dir.name for dir in lst if hasattr(dir, "name"))
+ dirset.difference_update({None}) # get rid of possible None
+ return sorted(dirset)
+
+ def __getitem__(self, name):
+ item = super().__getitem__(name)
+ if not item:
+ item = self.get_by_label(name)
+ return item
+
+ def __getattr__(self, name):
+ attr = super().__getattr__(name)
+ if not attr:
+ attr = self.get_by_label(name)
+ return attr
+
+ def __contains__(self, other):
+ if self.world[other]:
+ return True
+ try:
+ self.get_by_label(other)
+ except NoSuchLabelError:
+ return False
+ return True
+
+ def __objclass__(self):
+ # Play nice with inspect...
+ pass
+
+ def __hash__(self):
+ """Returns a hash based on base_iri.
+ This is done to keep Ontology hashable when defining __eq__.
+ """
+ return hash(self.base_iri)
+
+ def __eq__(self, other):
+ """Checks if this ontology is equal to `other`.
+
+ This function compares the result of
+ ``set(self.get_unabbreviated_triples(label='_:b'))``,
+ i.e. blank nodes are not distinguished, but relations to blank
+ nodes are included.
+ """
+ return set(self.get_unabbreviated_triples(blank="_:b")) == set(
+ other.get_unabbreviated_triples(blank="_:b")
+ )
+
+ def get_unabbreviated_triples(
+ self, subject=None, predicate=None, obj=None, blank=None
+ ):
+ """Returns all matching triples unabbreviated.
+
+ If `blank` is given, it will be used to represent blank nodes.
+ """
+ # pylint: disable=invalid-name
+ return _get_unabbreviated_triples(
+ self, subject=subject, predicate=predicate, obj=obj, blank=blank
+ )
+
+ def set_default_label_annotations(self):
+ """Sets the default label annotations."""
+ warnings.warn(
+ "Ontology.set_default_label_annotations() is deprecated. "
+ "Default label annotations are set by Ontology.__init__(). ",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.label_annotations = DEFAULT_LABEL_ANNOTATIONS[:]
+
+ def get_by_label(
+ self,
+ label: str,
+ label_annotations: str = None,
+ prefix: str = None,
+ imported: bool = True,
+ colon_in_label: bool = None,
+ ):
+ """Returns entity with label annotation `label`.
+
+ Arguments:
+ label: label so search for.
+ May be written as 'label' or 'prefix:label'.
+ get_by_label('prefix:label') ==
+ get_by_label('label', prefix='prefix').
+ label_annotations: a sequence of label annotation names to look up.
+ Defaults to the `label_annotations` property.
+ prefix: if provided, it should be the last component of
+ the base iri of an ontology (with trailing slash (/) or hash
+ (#) stripped off). The search for a matching label will be
+ limited to this namespace.
+ imported: Whether to also look for `label` in imported ontologies.
+ colon_in_label: Whether to accept colon (:) in a label or name-part
+ of IRI. Defaults to the `colon_in_label` property of `self`.
+ Setting this true cannot be combined with `prefix`.
+
+ If several entities have the same label, only the one which is
+ found first is returned.Use get_by_label_all() to get all matches.
+
+ Note, if different prefixes are provided in the label and via
+ the `prefix` argument a warning will be issued and the
+ `prefix` argument will take precedence.
+
+ A NoSuchLabelError is raised if `label` cannot be found.
+ """
+ # pylint: disable=too-many-arguments,too-many-branches,invalid-name
+ if not isinstance(label, str):
+ raise TypeError(
+ f"Invalid label definition, must be a string: '{label}'"
+ )
+
+ if label_annotations is None:
+ label_annotations = self.label_annotations
+
+ if colon_in_label is None:
+ colon_in_label = self._colon_in_label
+ if colon_in_label:
+ if prefix:
+ raise ValueError(
+ "`prefix` cannot be combined with `colon_in_label`"
+ )
+ else:
+ splitlabel = label.split(":", 1)
+ if len(splitlabel) == 2 and not splitlabel[1].startswith("//"):
+ label = splitlabel[1]
+ if prefix and prefix != splitlabel[0]:
+ warnings.warn(
+ f"Prefix given both as argument ({prefix}) "
+ f"and in label ({splitlabel[0]}). "
+ "Prefix given in argument takes precedence. "
+ )
+ if not prefix:
+ prefix = splitlabel[0]
+
+ if prefix:
+ entityset = self.get_by_label_all(
+ label,
+ label_annotations=label_annotations,
+ prefix=prefix,
+ )
+ if len(entityset) == 1:
+ return entityset.pop()
+ if len(entityset) > 1:
+ raise AmbiguousLabelError(
+ f"Several entities have the same label '{label}' "
+ f"with prefix '{prefix}'."
+ )
+ raise NoSuchLabelError(
+ f"No label annotations matches for '{label}' "
+ f"with prefix '{prefix}'."
+ )
+
+ # Label is a full IRI
+ entity = self.world[label]
+ if entity:
+ return entity
+
+ get_triples = (
+ self.world._get_data_triples_spod_spod
+ if imported
+ else self._get_data_triples_spod_spod
+ )
+
+ for storid in self._to_storids(label_annotations):
+ for s, _, _, _ in get_triples(None, storid, label, None):
+ return self.world[self._unabbreviate(s)]
+
+ # Special labels
+ if self._special_labels and label in self._special_labels:
+ return self._special_labels[label]
+
+ # Check if label is a name under base_iri
+ entity = self.world[self.base_iri + label]
+ if entity:
+ return entity
+
+ # Check label is the name of an entity
+ for entity in self.get_entities(imported=imported):
+ if label == entity.name:
+ return entity
+
+ raise NoSuchLabelError(f"No label annotations matches '{label}'")
+
+ def get_by_label_all(
+ self,
+ label,
+ label_annotations=None,
+ prefix=None,
+ exact_match=False,
+ ) -> "Set[Optional[owlready2.entity.EntityClass]]":
+ """Returns set of entities with label annotation `label`.
+
+ Arguments:
+ label: label so search for.
+ May be written as 'label' or 'prefix:label'. Wildcard matching
+ using glob pattern is also supported if `exact_match` is set to
+ false.
+ label_annotations: a sequence of label annotation names to look up.
+ Defaults to the `label_annotations` property.
+ prefix: if provided, it should be the last component of
+ the base iri of an ontology (with trailing slash (/) or hash
+ (#) stripped off). The search for a matching label will be
+ limited to this namespace.
+ exact_match: Do not treat "*" and brackets as special characters
+ when matching. May be useful if your ontology has labels
+ containing such labels.
+
+ Returns:
+ Set of all matching entities or an empty set if no matches
+ could be found.
+ """
+ if not isinstance(label, str):
+ raise TypeError(
+ f"Invalid label definition, " f"must be a string: {label!r}"
+ )
+ if " " in label:
+ raise ValueError(
+ f"Invalid label definition, {label!r} contains spaces."
+ )
+
+ if label_annotations is None:
+ label_annotations = self.label_annotations
+
+ entities = set()
+
+ # Check label annotations
+ if exact_match:
+ for storid in self._to_storids(label_annotations):
+ entities.update(
+ self.world._get_by_storid(s)
+ for s, _, _ in self.world._get_data_triples_spod_spod(
+ None, storid, str(label), None
+ )
+ )
+ else:
+ for storid in self._to_storids(label_annotations):
+ label_entity = self._unabbreviate(storid)
+ key = (
+ label_entity.name
+ if hasattr(label_entity, "name")
+ else label_entity
+ )
+ entities.update(self.world.search(**{key: label}))
+
+ if self._special_labels and label in self._special_labels:
+ entities.update(self._special_labels[label])
+
+ # Check name-part of IRI
+ if exact_match:
+ entities.update(
+ ent for ent in self.get_entities() if ent.name == str(label)
+ )
+ else:
+ matches = fnmatch.filter(
+ (ent.name for ent in self.get_entities()), label
+ )
+ entities.update(
+ ent for ent in self.get_entities() if ent.name in matches
+ )
+
+ if prefix:
+ return set(
+ ent
+ for ent in entities
+ if ent.namespace.ontology.prefix == prefix
+ )
+ return entities
+
+ def _to_storids(self, sequence, create_if_missing=False):
+ """Return a list of storid's corresponding to the elements in the
+ sequence `sequence`.
+
+ The elements may be either be full IRIs (strings) or Owlready2
+ entities with an associated storid.
+
+ If `create_if_missing` is true, new Owlready2 entities will be
+ created for IRIs that not already are associated with an
+ entity. Otherwise such IRIs will be skipped in the returned
+ list.
+ """
+ if not sequence:
+ return []
+ storids = []
+ for element in sequence:
+ if hasattr(element, "storid"):
+ storids.append(element.storid)
+ else:
+ storid = self.world._abbreviate(element, create_if_missing)
+ if storid:
+ storids.append(storid)
+ return storids
+
+ def add_label_annotation(self, iri):
+ """Adds label annotation used by get_by_label()."""
+ warnings.warn(
+ "Ontology.add_label_annotations() is deprecated. "
+ "Direct modify the `label_annotations` attribute instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if hasattr(iri, "iri"):
+ iri = iri.iri
+ if iri not in self.label_annotations:
+ self.label_annotations.append(iri)
+
+ def remove_label_annotation(self, iri):
+ """Removes label annotation used by get_by_label()."""
+ warnings.warn(
+ "Ontology.remove_label_annotations() is deprecated. "
+ "Direct modify the `label_annotations` attribute instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if hasattr(iri, "iri"):
+ iri = iri.iri
+ try:
+ self.label_annotations.remove(iri)
+ except ValueError:
+ pass
+
+ def set_common_prefix(
+ self,
+ iri_base: str = "http://emmo.info/emmo",
+ prefix: str = "emmo",
+ visited: "Optional[Set]" = None,
+ ) -> None:
+ """Set a common prefix for all imported ontologies
+ with the same first part of the base_iri.
+
+ Args:
+ iri_base: The start of the base_iri to look for. Defaults to
+ the emmo base_iri http://emmo.info/emmo
+ prefix: the desired prefix. Defaults to emmo.
+ visited: Ontologies to skip. Only intended for internal use.
+ """
+ if visited is None:
+ visited = set()
+ if self.base_iri.startswith(iri_base):
+ self.prefix = prefix
+ for onto in self.imported_ontologies:
+ if not onto in visited:
+ visited.add(onto)
+ onto.set_common_prefix(
+ iri_base=iri_base, prefix=prefix, visited=visited
+ )
+
+ def load( # pylint: disable=too-many-arguments,arguments-renamed
+ self,
+ only_local=False,
+ filename=None,
+ format=None, # pylint: disable=redefined-builtin
+ reload=None,
+ reload_if_newer=False,
+ url_from_catalog=None,
+ catalog_file="catalog-v001.xml",
+ emmo_based=True,
+ prefix=None,
+ prefix_emmo=None,
+ **kwargs,
+ ):
+ """Load the ontology.
+
+ Arguments
+ ---------
+ only_local: bool
+ Whether to only read local files. This requires that you
+ have appended the path to the ontology to owlready2.onto_path.
+ filename: str
+ Path to file to load the ontology from. Defaults to `base_iri`
+ provided to get_ontology().
+ format: str
+ Format of `filename`. Default is inferred from `filename`
+ extension.
+ reload: bool
+ Whether to reload the ontology if it is already loaded.
+ reload_if_newer: bool
+ Whether to reload the ontology if the source has changed since
+ last time it was loaded.
+ url_from_catalog: bool | None
+ Whether to use catalog file to resolve the location of `base_iri`.
+ If None, the catalog file is used if it exists in the same
+ directory as `filename`.
+ catalog_file: str
+ Name of Protègè catalog file in the same folder as the
+ ontology. This option is used together with `only_local` and
+ defaults to "catalog-v001.xml".
+ emmo_based: bool
+ Whether this is an EMMO-based ontology or not, default `True`.
+ prefix: defaults to self.get_namespace.name if
+ prefix_emmo: bool, default None. If emmo_based is True it
+ defaults to True and sets the prefix of all imported ontologies
+ with base_iri starting with 'http://emmo.info/emmo' to emmo
+ kwargs:
+ Additional keyword arguments are passed on to
+ owlready2.Ontology.load().
+ """
+ # TODO: make sure that `only_local` argument is respected...
+
+ if self.loaded:
+ return self
+ self._load(
+ only_local=only_local,
+ filename=filename,
+ format=format,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ url_from_catalog=url_from_catalog,
+ catalog_file=catalog_file,
+ **kwargs,
+ )
+
+ # Enable optimised search by get_by_label()
+ if self._special_labels is None and emmo_based:
+ top = self.world["http://www.w3.org/2002/07/owl#topObjectProperty"]
+ self._special_labels = {
+ "Thing": owlready2.Thing,
+ "Nothing": owlready2.Nothing,
+ "topObjectProperty": top,
+ "owl:Thing": owlready2.Thing,
+ "owl:Nothing": owlready2.Nothing,
+ "owl:topObjectProperty": top,
+ }
+ # set prefix if another prefix is desired
+ # if we do this, shouldn't we make the name of all
+ # entities of the given ontology to the same?
+ if prefix:
+ self.prefix = prefix
+ else:
+ self.prefix = self.name
+
+ if emmo_based and prefix_emmo is None:
+ prefix_emmo = True
+ if prefix_emmo:
+ self.set_common_prefix()
+
+ return self
+
+ def _load( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
+ self,
+ only_local=False,
+ filename=None,
+ format=None, # pylint: disable=redefined-builtin
+ reload=None,
+ reload_if_newer=False,
+ url_from_catalog=None,
+ catalog_file="catalog-v001.xml",
+ **kwargs,
+ ):
+ """Help function for load()."""
+ web_protocol = "http://", "https://", "ftp://"
+ url = str(filename) if filename else self.base_iri.rstrip("/#")
+ if url.startswith(web_protocol):
+ baseurl = os.path.dirname(url)
+ catalogurl = baseurl + "/" + catalog_file
+ else:
+ if url.startswith("file://"):
+ url = url[7:]
+ url = os.path.normpath(os.path.abspath(url))
+ baseurl = os.path.dirname(url)
+ catalogurl = os.path.join(baseurl, catalog_file)
+
+ def getmtime(path):
+ if os.path.exists(path):
+ return os.path.getmtime(path)
+ return 0.0
+
+ # Resolve url from catalog file
+ iris = {}
+ dirs = set()
+ if url_from_catalog or url_from_catalog is None:
+ not_reload = not reload and (
+ not reload_if_newer
+ or getmtime(catalogurl)
+ > self.world._cached_catalogs[catalogurl][0]
+ )
+ # get iris from catalog already in cached catalogs
+ if catalogurl in self.world._cached_catalogs and not_reload:
+ _, iris, dirs = self.world._cached_catalogs[catalogurl]
+ # do not update cached_catalogs if url already in _iri_mappings
+ # and reload not forced
+ elif url in self.world._iri_mappings and not_reload:
+ pass
+ # update iris from current catalogurl
+ else:
+ try:
+ iris, dirs = read_catalog(
+ uri=catalogurl,
+ recursive=False,
+ return_paths=True,
+ catalog_file=catalog_file,
+ )
+ except ReadCatalogError:
+ if url_from_catalog is not None:
+ raise
+ self.world._cached_catalogs[catalogurl] = (0.0, {}, set())
+ else:
+ self.world._cached_catalogs[catalogurl] = (
+ getmtime(catalogurl),
+ iris,
+ dirs,
+ )
+ self.world._iri_mappings.update(iris)
+ resolved_url = self.world._iri_mappings.get(url, url)
+ # Append paths from catalog file to onto_path
+ for path in sorted(dirs, reverse=True):
+ if path not in owlready2.onto_path:
+ owlready2.onto_path.append(path)
+
+ # Use catalog file to update IRIs of imported ontologies
+ # in internal store and try to load again...
+ if self.world._iri_mappings:
+ for abbrev_iri in self.world._get_obj_triples_sp_o(
+ self.storid, owlready2.owl_imports
+ ):
+ iri = self._unabbreviate(abbrev_iri)
+ if iri in self.world._iri_mappings:
+ self._del_obj_triple_spo(
+ self.storid, owlready2.owl_imports, abbrev_iri
+ )
+ self._add_obj_triple_spo(
+ self.storid,
+ owlready2.owl_imports,
+ self._abbreviate(self.world._iri_mappings[iri]),
+ )
+
+ # Load ontology
+ try:
+ self.loaded = False
+ fmt = format if format else guess_format(resolved_url, fmap=FMAP)
+ if fmt and fmt not in OWLREADY2_FORMATS:
+ # Convert filename to rdfxml before passing it to owlready2
+ graph = rdflib.Graph()
+ try:
+ graph.parse(resolved_url, format=fmt)
+ except URLError as err:
+ raise EMMOntoPyException(
+ "URL error", err, resolved_url
+ ) from err
+
+ with tempfile.NamedTemporaryFile() as handle:
+ graph.serialize(destination=handle, format="xml")
+ handle.seek(0)
+ return super().load(
+ only_local=True,
+ fileobj=handle,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ format="rdfxml",
+ **kwargs,
+ )
+ elif resolved_url.startswith(web_protocol):
+ return super().load(
+ only_local=only_local,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ **kwargs,
+ )
+
+ else:
+ with open(resolved_url, "rb") as handle:
+ return super().load(
+ only_local=only_local,
+ fileobj=handle,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ **kwargs,
+ )
+ except owlready2.OwlReadyOntologyParsingError:
+ # Owlready2 is not able to parse the ontology - most
+ # likely because imported ontologies must be resolved
+ # using the catalog file.
+
+ # Reraise if we don't want to read from the catalog file
+ if not url_from_catalog and url_from_catalog is not None:
+ raise
+
+ warnings.warn(
+ "Recovering from Owlready2 parsing error... might be deprecated"
+ )
+
+ # Copy the ontology into a local folder and try again
+ with tempfile.TemporaryDirectory() as handle:
+ output = os.path.join(handle, os.path.basename(resolved_url))
+ convert_imported(
+ input_ontology=resolved_url,
+ output_ontology=output,
+ input_format=fmt,
+ output_format="xml",
+ url_from_catalog=url_from_catalog,
+ catalog_file=catalog_file,
+ )
+
+ self.loaded = False
+ with open(output, "rb") as handle:
+ try:
+ return super().load(
+ only_local=True,
+ fileobj=handle,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ format="rdfxml",
+ **kwargs,
+ )
+ except HTTPError as exc: # Add url to HTTPError message
+ raise HTTPError(
+ url=exc.url,
+ code=exc.code,
+ msg=f"{exc.url}: {exc.msg}",
+ hdrs=exc.hdrs,
+ fp=exc.fp,
+ ).with_traceback(exc.__traceback__)
+
+ except HTTPError as exc: # Add url to HTTPError message
+ raise HTTPError(
+ url=exc.url,
+ code=exc.code,
+ msg=f"{exc.url}: {exc.msg}",
+ hdrs=exc.hdrs,
+ fp=exc.fp,
+ ).with_traceback(exc.__traceback__)
+
+ def save(
+ self,
+ filename=None,
+ format=None,
+ dir=".",
+ mkdir=False,
+ overwrite=False,
+ recursive=False,
+ squash=False,
+ write_catalog_file=False,
+ append_catalog=False,
+ catalog_file="catalog-v001.xml",
+ ):
+ """Writes the ontology to file.
+
+ Parameters
+ ----------
+ filename: None | str | Path
+ Name of file to write to. If None, it defaults to the name
+ of the ontology with `format` as file extension.
+ format: str
+ Output format. The default is to infer it from `filename`.
+ dir: str | Path
+ If `filename` is a relative path, it is a relative path to `dir`.
+ mkdir: bool
+ Whether to create output directory if it does not exists.
+ owerwrite: bool
+ If true and `filename` exists, remove the existing file before
+ saving. The default is to append to an existing ontology.
+ recursive: bool
+ Whether to save imported ontologies recursively. This is
+ commonly combined with `filename=None`, `dir` and `mkdir`.
+ squash: bool
+ If true, rdflib will be used to save the current ontology
+ together with all its sub-ontologies into `filename`.
+ It make no sense to combine this with `recursive`.
+ write_catalog_file: bool
+ Whether to also write a catalog file to disk.
+ append_catalog: bool
+ Whether to append to an existing catalog file.
+ catalog_file: str | Path
+ Name of catalog file. If not an absolute path, it is prepended
+ to `dir`.
+ """
+ # pylint: disable=redefined-builtin,too-many-arguments
+ # pylint: disable=too-many-statements,too-many-branches
+ # pylint: disable=too-many-locals,arguments-renamed
+ if not _validate_installed_version(
+ package="rdflib", min_version="6.0.0"
+ ) and format == FMAP.get("ttl", ""):
+ from rdflib import ( # pylint: disable=import-outside-toplevel
+ __version__ as __rdflib_version__,
+ )
+
+ warnings.warn(
+ IncompatibleVersion(
+ "To correctly convert to Turtle format, rdflib must be "
+ "version 6.0.0 or greater, however, the detected rdflib "
+ "version used by your Python interpreter is "
+ f"{__rdflib_version__!r}. For more information see the "
+ "'Known issues' section of the README."
+ )
+ )
+
+ revmap = {value: key for key, value in FMAP.items()}
+ if filename is None:
+ if format:
+ fmt = revmap.get(format, format)
+ filename = f"{self.name}.{fmt}"
+ else:
+ raise TypeError("`filename` and `format` cannot both be None.")
+ filename = os.path.join(dir, filename)
+ dir = Path(filename).resolve().parent
+
+ if mkdir:
+ outdir = Path(filename).parent.resolve()
+ if not outdir.exists():
+ outdir.mkdir(parents=True)
+
+ if not format:
+ format = guess_format(filename, fmap=FMAP)
+ fmt = revmap.get(format, format)
+
+ if overwrite and filename and os.path.exists(filename):
+ os.remove(filename)
+
+ EMMO = rdflib.Namespace( # pylint:disable=invalid-name
+ "http://emmo.info/emmo#"
+ )
+
+ if recursive:
+ if squash:
+ raise ValueError(
+ "`recursive` and `squash` should not both be true"
+ )
+ layout = directory_layout(self)
+
+ for onto, path in layout.items():
+ fname = Path(dir) / f"{path}.{fmt}"
+ onto.save(
+ filename=fname,
+ format=format,
+ dir=dir,
+ mkdir=mkdir,
+ overwrite=overwrite,
+ recursive=False,
+ squash=False,
+ write_catalog_file=False,
+ )
+
+ if write_catalog_file:
+ catalog_files = set()
+ irimap = {}
+ for onto, path in layout.items():
+ irimap[
+ onto.get_version(as_iri=True)
+ ] = f"{dir}/{path}.{fmt}"
+ catalog_files.add(Path(path).parent / catalog_file)
+
+ for catfile in catalog_files:
+ write_catalog(
+ irimap.copy(),
+ output=catfile,
+ directory=dir,
+ append=append_catalog,
+ )
+
+ elif write_catalog_file:
+ write_catalog(
+ {self.get_version(as_iri=True): filename},
+ output=catalog_file,
+ directory=dir,
+ append=append_catalog,
+ )
+
+ if squash:
+ from rdflib import ( # pylint:disable=import-outside-toplevel
+ URIRef,
+ RDF,
+ OWL,
+ )
+
+ graph = self.world.as_rdflib_graph()
+ graph.namespace_manager.bind("emmo", EMMO)
+
+ # Remove anonymous namespace and imports
+ graph.remove((URIRef("http://anonymous"), RDF.type, OWL.Ontology))
+ imports = list(graph.triples((None, OWL.imports, None)))
+ for triple in imports:
+ graph.remove(triple)
+
+ graph.serialize(destination=filename, format=format)
+ elif format in OWLREADY2_FORMATS:
+ super().save(file=filename, format=fmt)
+ else:
+ # The try-finally clause is needed for cleanup and because
+ # we have to provide delete=False to NamedTemporaryFile
+ # since Windows does not allow to reopen an already open
+ # file.
+ try:
+ with tempfile.NamedTemporaryFile(
+ suffix=".owl", delete=False
+ ) as handle:
+ tmpfile = handle.name
+ super().save(tmpfile, format="ntriples")
+ graph = rdflib.Graph()
+ graph.parse(tmpfile, format="ntriples")
+ graph.serialize(destination=filename, format=format)
+ finally:
+ os.remove(tmpfile)
+
+ def get_imported_ontologies(self, recursive=False):
+ """Return a list with imported ontologies.
+
+ If `recursive` is `True`, ontologies imported by imported ontologies
+ are also returned.
+ """
+
+ def rec_imported(onto):
+ for ontology in onto.imported_ontologies:
+ if ontology not in imported:
+ imported.add(ontology)
+ rec_imported(ontology)
+
+ if recursive:
+ imported = set()
+ rec_imported(self)
+ return list(imported)
+
+ return self.imported_ontologies
+
+ def get_entities( # pylint: disable=too-many-arguments
+ self,
+ imported=True,
+ classes=True,
+ individuals=True,
+ object_properties=True,
+ data_properties=True,
+ annotation_properties=True,
+ ):
+ """Return a generator over (optionally) all classes, individuals,
+ object_properties, data_properties and annotation_properties.
+
+ If `imported` is `True`, entities in imported ontologies will also
+ be included.
+ """
+ generator = []
+ if classes:
+ generator.append(self.classes(imported))
+ if individuals:
+ generator.append(self.individuals(imported))
+ if object_properties:
+ generator.append(self.object_properties(imported))
+ if data_properties:
+ generator.append(self.data_properties(imported))
+ if annotation_properties:
+ generator.append(self.annotation_properties(imported))
+ for entity in itertools.chain(*generator):
+ yield entity
+
+ def classes(self, imported=False):
+ """Returns an generator over all classes.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("classes", imported=imported)
+
+ def _entities(
+ self, entity_type, imported=False
+ ): # pylint: disable=too-many-branches
+ """Returns an generator over all entities of the desired type.
+ This is a helper function for `classes()`, `individuals()`,
+ `object_properties()`, `data_properties()` and
+ `annotation_properties()`.
+
+ Arguments:
+ entity_type: The type of entity desired given as a string.
+ Can be any of `classes`, `individuals`,
+ `object_properties`, `data_properties` and
+ `annotation_properties`.
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+
+ generator = []
+ if imported:
+ ontologies = self.get_imported_ontologies(recursive=True)
+ ontologies.append(self)
+ for onto in ontologies:
+ if entity_type == "classes":
+ for cls in list(onto.classes()):
+ generator.append(cls)
+ elif entity_type == "individuals":
+ for ind in list(onto.individuals()):
+ generator.append(ind)
+ elif entity_type == "object_properties":
+ for prop in list(onto.object_properties()):
+ generator.append(prop)
+ elif entity_type == "data_properties":
+ for prop in list(onto.data_properties()):
+ generator.append(prop)
+ elif entity_type == "annotation_properties":
+ for prop in list(onto.annotation_properties()):
+ generator.append(prop)
+ else:
+ if entity_type == "classes":
+ generator = super().classes()
+ elif entity_type == "individuals":
+ generator = super().individuals()
+ elif entity_type == "object_properties":
+ generator = super().object_properties()
+ elif entity_type == "data_properties":
+ generator = super().data_properties()
+ elif entity_type == "annotation_properties":
+ generator = super().annotation_properties()
+
+ for entity in generator:
+ yield entity
+
+ def individuals(self, imported=False):
+ """Returns an generator over all individuals.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("individuals", imported=imported)
+
+ def object_properties(self, imported=False):
+ """Returns an generator over all object_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("object_properties", imported=imported)
+
+ def data_properties(self, imported=False):
+ """Returns an generator over all data_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("data_properties", imported=imported)
+
+ def annotation_properties(self, imported=False):
+ """Returns an generator over all annotation_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+
+ """
+ return self._entities("annotation_properties", imported=imported)
+
+ def get_root_classes(self, imported=False):
+ """Returns a list or root classes."""
+ return [
+ cls
+ for cls in self.classes(imported=imported)
+ if not cls.ancestors().difference(set([cls, owlready2.Thing]))
+ ]
+
+ def get_root_object_properties(self, imported=False):
+ """Returns a list of root object properties."""
+ props = set(self.object_properties(imported=imported))
+ return [p for p in props if not props.intersection(p.is_a)]
+
+ def get_root_data_properties(self, imported=False):
+ """Returns a list of root object properties."""
+ props = set(self.data_properties(imported=imported))
+ return [p for p in props if not props.intersection(p.is_a)]
+
+ def get_roots(self, imported=False):
+ """Returns all class, object_property and data_property roots."""
+ roots = self.get_root_classes(imported=imported)
+ roots.extend(self.get_root_object_properties(imported=imported))
+ roots.extend(self.get_root_data_properties(imported=imported))
+ return roots
+
+ def sync_python_names(self, annotations=("prefLabel", "label", "altLabel")):
+ """Update the `python_name` attribute of all properties.
+
+ The python_name attribute will be set to the first non-empty
+ annotation in the sequence of annotations in `annotations` for
+ the property.
+ """
+
+ def update(gen):
+ for prop in gen:
+ for annotation in annotations:
+ if hasattr(prop, annotation) and getattr(prop, annotation):
+ prop.python_name = getattr(prop, annotation).first()
+ break
+
+ update(
+ self.get_entities(
+ classes=False,
+ individuals=False,
+ object_properties=False,
+ data_properties=False,
+ )
+ )
+ update(
+ self.get_entities(
+ classes=False, individuals=False, annotation_properties=False
+ )
+ )
+
+ def rename_entities(
+ self,
+ annotations=("prefLabel", "label", "altLabel"),
+ ):
+ """Set `name` of all entities to the first non-empty annotation in
+ `annotations`.
+
+ Warning, this method changes all IRIs in the ontology. However,
+ it may be useful to make the ontology more readable and to work
+ with it together with a triple store.
+ """
+ for entity in self.get_entities():
+ for annotation in annotations:
+ if hasattr(entity, annotation):
+ name = getattr(entity, annotation).first()
+ if name:
+ entity.name = name
+ break
+
+ def sync_reasoner(
+ self, reasoner="HermiT", include_imported=False, **kwargs
+ ):
+ """Update current ontology by running the given reasoner.
+
+ Supported values for `reasoner` are 'HermiT' (default), Pellet
+ and 'FaCT++'.
+
+ If `include_imported` is true, the reasoner will also reason
+ over imported ontologies. Note that this may be **very** slow.
+
+ Keyword arguments are passed to the underlying owlready2 function.
+ """
+ if reasoner == "FaCT++":
+ sync = sync_reasoner_factpp
+ elif reasoner == "Pellet":
+ sync = owlready2.sync_reasoner_pellet
+ elif reasoner == "HermiT":
+ sync = owlready2.sync_reasoner_hermit
+ else:
+ raise ValueError(
+ f"Unknown reasoner '{reasoner}'. Supported reasoners "
+ "are 'Pellet', 'HermiT' and 'FaCT++'."
+ )
+
+ # For some reason we must visit all entities once before running
+ # the reasoner...
+ list(self.get_entities())
+
+ with self:
+ if include_imported:
+ sync(self.world, **kwargs)
+ else:
+ sync(self, **kwargs)
+
+ def sync_attributes( # pylint: disable=too-many-branches
+ self,
+ name_policy=None,
+ name_prefix="",
+ class_docstring="comment",
+ sync_imported=False,
+ ):
+ """This method is intended to be called after you have added new
+ classes (typically via Python) to make sure that attributes like
+ `label` and `comments` are defined.
+
+ If a class, object property, data property or annotation
+ property in the current ontology has no label, the name of
+ the corresponding Python class will be assigned as label.
+
+ If a class, object property, data property or annotation
+ property has no comment, it will be assigned the docstring of
+ the corresponding Python class.
+
+ `name_policy` specify wether and how the names in the ontology
+ should be updated. Valid values are:
+ None not changed
+ "uuid" `name_prefix` followed by a global unique id (UUID).
+ If the name is already valid accoridng to this standard
+ it will not be regenerated.
+ "sequential" `name_prefix` followed a sequantial number.
+ EMMO conventions imply ``name_policy=='uuid'``.
+
+ If `sync_imported` is true, all imported ontologies are also
+ updated.
+
+ The `class_docstring` argument specifies the annotation that
+ class docstrings are mapped to. Defaults to "comment".
+ """
+ for cls in itertools.chain(
+ self.classes(),
+ self.object_properties(),
+ self.data_properties(),
+ self.annotation_properties(),
+ ):
+ if not hasattr(cls, "prefLabel"):
+ # no prefLabel - create new annotation property..
+ with self:
+ # pylint: disable=invalid-name,missing-class-docstring
+ # pylint: disable=unused-variable
+ class prefLabel(owlready2.label):
+ pass
+
+ cls.prefLabel = [locstr(cls.__name__, lang="en")]
+ elif not cls.prefLabel:
+ cls.prefLabel.append(locstr(cls.__name__, lang="en"))
+ if class_docstring and hasattr(cls, "__doc__") and cls.__doc__:
+ getattr(cls, class_docstring).append(
+ locstr(inspect.cleandoc(cls.__doc__), lang="en")
+ )
+
+ for ind in self.individuals():
+ if not hasattr(ind, "prefLabel"):
+ # no prefLabel - create new annotation property..
+ with self:
+ # pylint: disable=invalid-name,missing-class-docstring
+ # pylint: disable=function-redefined
+ class prefLabel(owlready2.label):
+ iri = "http://www.w3.org/2004/02/skos/core#prefLabel"
+
+ ind.prefLabel = [locstr(ind.name, lang="en")]
+ elif not ind.prefLabel:
+ ind.prefLabel.append(locstr(ind.name, lang="en"))
+
+ chain = itertools.chain(
+ self.classes(),
+ self.individuals(),
+ self.object_properties(),
+ self.data_properties(),
+ self.annotation_properties(),
+ )
+ if name_policy == "uuid":
+ for obj in chain:
+ try:
+ # Passing the following means that the name is valid
+ # and need not be regenerated.
+ if not obj.name.startswith(name_prefix):
+ raise ValueError
+ uuid.UUID(obj.name.lstrip(name_prefix), version=5)
+ except ValueError:
+ obj.name = name_prefix + str(
+ uuid.uuid5(uuid.NAMESPACE_DNS, obj.name)
+ )
+ elif name_policy == "sequential":
+ for obj in chain:
+ counter = 0
+ while f"{self.base_iri}{name_prefix}{counter}" in self:
+ counter += 1
+ obj.name = f"{name_prefix}{counter}"
+ elif name_policy is not None:
+ raise TypeError(f"invalid name_policy: {name_policy!r}")
+
+ if sync_imported:
+ for onto in self.imported_ontologies:
+ onto.sync_attributes()
+
+ def get_relations(self):
+ """Returns a generator for all relations."""
+ warnings.warn(
+ "Ontology.get_relations() is deprecated. Use "
+ "onto.object_properties() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.object_properties()
+
+ def get_annotations(self, entity):
+ """Returns a dict with annotations for `entity`. Entity may be given
+ either as a ThingClass object or as a label."""
+ warnings.warn(
+ "Ontology.get_annotations(entity) is deprecated. Use "
+ "entity.get_annotations() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ res = {"comment": getattr(entity, "comment", "")}
+ for annotation in self.annotation_properties():
+ res[annotation.label.first()] = [
+ obj.strip('"')
+ for _, _, obj in self.get_triples(
+ entity.storid, annotation.storid, None
+ )
+ ]
+ return res
+
+ def get_branch( # pylint: disable=too-many-arguments
+ self,
+ root,
+ leaves=(),
+ include_leaves=True,
+ strict_leaves=False,
+ exclude=None,
+ sort=False,
+ ):
+ """Returns a set with all direct and indirect subclasses of `root`.
+ Any subclass found in the sequence `leaves` will be included in
+ the returned list, but its subclasses will not. The elements
+ of `leaves` may be ThingClass objects or labels.
+
+ Subclasses of any subclass found in the sequence `leaves` will
+ be excluded from the returned list, where the elements of `leaves`
+ may be ThingClass objects or labels.
+
+ If `include_leaves` is true, the leaves are included in the returned
+ list, otherwise they are not.
+
+ If `strict_leaves` is true, any descendant of a leaf will be excluded
+ in the returned set.
+
+ If given, `exclude` may be a sequence of classes, including
+ their subclasses, to exclude from the output.
+
+ If `sort` is True, a list sorted according to depth and label
+ will be returned instead of a set.
+ """
+
+ def _branch(root, leaves):
+ if root not in leaves:
+ branch = {
+ root,
+ }
+ for cls in root.subclasses():
+ # Defining a branch is actually quite tricky. Consider
+ # the case:
+ #
+ # L isA R
+ # A isA L
+ # A isA R
+ #
+ # where R is the root, L is a leaf and A is a direct
+ # child of both. Logically, since A is a child of the
+ # leaf we want to skip A. But a strait forward imple-
+ # mentation will see that A is a child of the root and
+ # include it. Requireing that the R should be a strict
+ # parent of A solves this.
+ if root in cls.get_parents(strict=True):
+ branch.update(_branch(cls, leaves))
+ else:
+ branch = (
+ {
+ root,
+ }
+ if include_leaves
+ else set()
+ )
+ return branch
+
+ if isinstance(root, str):
+ root = self.get_by_label(root)
+
+ leaves = set(
+ self.get_by_label(leaf) if isinstance(leaf, str) else leaf
+ for leaf in leaves
+ )
+ leaves.discard(root)
+
+ if exclude:
+ exclude = set(
+ self.get_by_label(e) if isinstance(e, str) else e
+ for e in exclude
+ )
+ leaves.update(exclude)
+
+ branch = _branch(root, leaves)
+
+ # Exclude all descendants of any leaf
+ if strict_leaves:
+ descendants = root.descendants()
+ for leaf in leaves:
+ if leaf in descendants:
+ branch.difference_update(
+ leaf.descendants(include_self=False)
+ )
+
+ if exclude:
+ branch.difference_update(exclude)
+
+ # Sort according to depth, then by label
+ if sort:
+ branch = sorted(
+ sorted(branch, key=asstring),
+ key=lambda x: len(x.mro()),
+ )
+
+ return branch
+
+ def is_individual(self, entity):
+ """Returns true if entity is an individual."""
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ return isinstance(entity, owlready2.Thing)
+
+ # FIXME - deprecate this method as soon the ThingClass property
+ # `defined_class` works correct in Owlready2
+ def is_defined(self, entity):
+ """Returns true if the entity is a defined class.
+
+ Deprecated, use the `is_defined` property of the classes
+ (ThingClass subclasses) instead.
+ """
+ warnings.warn(
+ "This method is deprecated. Use the `is_defined` property of "
+ "the classes instad.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ return hasattr(entity, "equivalent_to") and bool(entity.equivalent_to)
+
+ def get_version(self, as_iri=False) -> str:
+ """Returns the version number of the ontology as inferred from the
+ owl:versionIRI tag or, if owl:versionIRI is not found, from
+ owl:versionINFO.
+
+ If `as_iri` is True, the full versionIRI is returned.
+ """
+ version_iri_storid = self.world._abbreviate(
+ "http://www.w3.org/2002/07/owl#versionIRI"
+ )
+ tokens = self.get_triples(s=self.storid, p=version_iri_storid)
+ if (not tokens) and (as_iri is True):
+ raise TypeError(
+ "No owl:versionIRI "
+ f"in Ontology {self.base_iri!r}. "
+ "Search for owl:versionInfo with as_iri=False"
+ )
+ if tokens:
+ _, _, obj = tokens[0]
+ version_iri = self.world._unabbreviate(obj)
+ if as_iri:
+ return version_iri
+ return infer_version(self.base_iri, version_iri)
+
+ version_info_storid = self.world._abbreviate(
+ "http://www.w3.org/2002/07/owl#versionInfo"
+ )
+ tokens = self.get_triples(s=self.storid, p=version_info_storid)
+ if not tokens:
+ raise TypeError(
+ "No versionIRI or versionInfo " f"in Ontology {self.base_iri!r}"
+ )
+ _, _, version_info = tokens[0]
+ return version_info.split("^^")[0].strip('"')
+
+ def set_version(self, version=None, version_iri=None):
+ """Assign version to ontology by asigning owl:versionIRI.
+
+ If `version` but not `version_iri` is provided, the version
+ IRI will be the combination of `base_iri` and `version`.
+ """
+ _version_iri = "http://www.w3.org/2002/07/owl#versionIRI"
+ version_iri_storid = self.world._abbreviate(_version_iri)
+ if self._has_obj_triple_spo( # pylint: disable=unexpected-keyword-arg
+ # For some reason _has_obj_triples_spo exists in both
+ # owlready2.namespace.Namespace (with arguments subject/predicate)
+ # and in owlready2.triplelite._GraphManager (with arguments s/p)
+ # owlready2.Ontology inherits from Namespace directly
+ # and pylint checks that.
+ # It actually accesses the one in triplelite.
+ # subject=self.storid, predicate=version_iri_storid
+ s=self.storid,
+ p=version_iri_storid,
+ ):
+ self._del_obj_triple_spo(s=self.storid, p=version_iri_storid)
+
+ if not version_iri:
+ if not version:
+ raise TypeError(
+ "Either `version` or `version_iri` must be provided"
+ )
+ head, tail = self.base_iri.rstrip("#/").rsplit("/", 1)
+ version_iri = "/".join([head, version, tail])
+
+ self._add_obj_triple_spo(
+ s=self.storid,
+ p=self.world._abbreviate(_version_iri),
+ o=self.world._abbreviate(version_iri),
+ )
+
+ def get_graph(self, **kwargs):
+ """Returns a new graph object. See emmo.graph.OntoGraph.
+
+ Note that this method requires the Python graphviz package.
+ """
+ # pylint: disable=import-outside-toplevel,cyclic-import
+ from ontopy.graph import OntoGraph
+
+ return OntoGraph(self, **kwargs)
+
+ @staticmethod
+ def common_ancestors(cls1, cls2):
+ """Return a list of common ancestors for `cls1` and `cls2`."""
+ return set(cls1.ancestors()).intersection(cls2.ancestors())
+
+ def number_of_generations(self, descendant, ancestor):
+ """Return shortest distance from ancestor to descendant"""
+ if ancestor not in descendant.ancestors():
+ raise ValueError("Descendant is not a descendant of ancestor")
+ return self._number_of_generations(descendant, ancestor, 0)
+
+ def _number_of_generations(self, descendant, ancestor, counter):
+ """Recursive help function to number_of_generations(), return
+ distance between a ancestor-descendant pair (counter+1)."""
+ if descendant.name == ancestor.name:
+ return counter
+ try:
+ return min(
+ self._number_of_generations(parent, ancestor, counter + 1)
+ for parent in descendant.get_parents()
+ if ancestor in parent.ancestors()
+ )
+ except ValueError:
+ return counter
+
+ def closest_common_ancestors(self, cls1, cls2):
+ """Returns a list with closest_common_ancestor for cls1 and cls2"""
+ distances = {}
+ for ancestor in self.common_ancestors(cls1, cls2):
+ distances[ancestor] = self.number_of_generations(
+ cls1, ancestor
+ ) + self.number_of_generations(cls2, ancestor)
+ return [
+ ancestor
+ for ancestor, distance in distances.items()
+ if distance == min(distances.values())
+ ]
+
+ @staticmethod
+ def closest_common_ancestor(*classes):
+ """Returns closest_common_ancestor for the given classes."""
+ mros = [cls.mro() for cls in classes]
+ track = defaultdict(int)
+ while mros:
+ for mro in mros:
+ cur = mro.pop(0)
+ track[cur] += 1
+ if track[cur] == len(classes):
+ return cur
+ if len(mro) == 0:
+ mros.remove(mro)
+ raise EMMOntoPyException(
+ "A closest common ancestor should always exist !"
+ )
+
+ def get_ancestors(
+ self,
+ classes: "Union[List, ThingClass]",
+ closest: bool = False,
+ generations: int = None,
+ strict: bool = True,
+ ) -> set:
+ """Return ancestors of all classes in `classes`.
+ Args:
+ classes: class(es) for which ancestors should be returned.
+ generations: Include this number of generations, default is all.
+ closest: If True, return all ancestors up to and including the
+ closest common ancestor. Return all if False.
+ strict: If True returns only real ancestors, i.e. `classes` are
+ are not included in the returned set.
+ Returns:
+ Set of ancestors to `classes`.
+ """
+ if not isinstance(classes, Iterable):
+ classes = [classes]
+
+ ancestors = set()
+ if not classes:
+ return ancestors
+
+ def addancestors(entity, counter, subject):
+ if counter > 0:
+ for parent in entity.get_parents(strict=True):
+ subject.add(parent)
+ addancestors(parent, counter - 1, subject)
+
+ if closest:
+ if generations is not None:
+ raise ValueError(
+ "Only one of `generations` or `closest` may be specified."
+ )
+
+ closest_ancestor = self.closest_common_ancestor(*classes)
+ for cls in classes:
+ ancestors.update(
+ anc
+ for anc in cls.ancestors()
+ if closest_ancestor in anc.ancestors()
+ )
+ elif isinstance(generations, int):
+ for entity in classes:
+ addancestors(entity, generations, ancestors)
+ else:
+ ancestors.update(*(cls.ancestors() for cls in classes))
+
+ if strict:
+ return ancestors.difference(classes)
+ return ancestors
+
+ def get_descendants(
+ self,
+ classes: "Union[List, ThingClass]",
+ generations: int = None,
+ common: bool = False,
+ ) -> set:
+ """Return descendants/subclasses of all classes in `classes`.
+ Args:
+ classes: class(es) for which descendants are desired.
+ common: whether to only return descendants common to all classes.
+ generations: Include this number of generations, default is all.
+ Returns:
+ A set of descendants for given number of generations.
+ If 'common'=True, the common descendants are returned
+ within the specified number of generations.
+ 'generations' defaults to all.
+ """
+
+ if not isinstance(classes, Iterable):
+ classes = [classes]
+
+ descendants = {name: [] for name in classes}
+
+ def _children_recursively(num, newentity, parent, descendants):
+ """Helper function to get all children up to generation."""
+ for child in self.get_children_of(newentity):
+ descendants[parent].append(child)
+ if num < generations:
+ _children_recursively(num + 1, child, parent, descendants)
+
+ if generations == 0:
+ return set()
+
+ if not generations:
+ for entity in classes:
+ descendants[entity] = entity.descendants()
+ # only include proper descendants
+ descendants[entity].remove(entity)
+ else:
+ for entity in classes:
+ _children_recursively(1, entity, entity, descendants)
+
+ results = descendants.values()
+ if common is True:
+ return set.intersection(*map(set, results))
+ return set(flatten(results))
+
+ def get_wu_palmer_measure(self, cls1, cls2):
+ """Return Wu-Palmer measure for semantic similarity.
+
+ Returns Wu-Palmer measure for semantic similarity between
+ two concepts.
+ Wu, Palmer; ACL 94: Proceedings of the 32nd annual meeting on
+ Association for Computational Linguistics, June 1994.
+ """
+ cca = self.closest_common_ancestor(cls1, cls2)
+ ccadepth = self.number_of_generations(cca, self.Thing)
+ generations1 = self.number_of_generations(cls1, cca)
+ generations2 = self.number_of_generations(cls2, cca)
+ return 2 * ccadepth / (generations1 + generations2 + 2 * ccadepth)
+
+ def new_entity(
+ self,
+ name: str,
+ parent: Union[
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ Iterable,
+ ],
+ entitytype: Optional[
+ Union[
+ str,
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ ]
+ ] = "class",
+ preflabel: Optional[str] = None,
+ ) -> Union[
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ ]:
+ """Create and return new entity
+
+ Args:
+ name: name of the entity
+ parent: parent(s) of the entity
+ entitytype: type of the entity,
+ default is 'class' (str) 'ThingClass' (owlready2 Python class).
+ Other options
+ are 'data_property', 'object_property',
+ 'annotation_property' (strings) or the
+ Python classes ObjectPropertyClass,
+ DataPropertyClass and AnnotationProperty classes.
+ preflabel: if given, add this as a skos:prefLabel annotation
+ to the new entity. If None (default), `name` will
+ be added as prefLabel if skos:prefLabel is in the ontology
+ and listed in `self.label_annotations`. Set `preflabel` to
+ False, to avoid assigning a prefLabel.
+
+ Returns:
+ the new entity.
+
+ Throws exception if name consists of more than one word, if type is not
+ one of the allowed types, or if parent is not of the correct type.
+ By default, the parent is Thing.
+
+ """
+ # pylint: disable=invalid-name
+ if " " in name:
+ raise LabelDefinitionError(
+ f"Error in label name definition '{name}': "
+ f"Label consists of more than one word."
+ )
+ parents = tuple(parent) if isinstance(parent, Iterable) else (parent,)
+ if entitytype == "class":
+ parenttype = owlready2.ThingClass
+ elif entitytype == "data_property":
+ parenttype = owlready2.DataPropertyClass
+ elif entitytype == "object_property":
+ parenttype = owlready2.ObjectPropertyClass
+ elif entitytype == "annotation_property":
+ parenttype = owlready2.AnnotationPropertyClass
+ elif entitytype in [
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ ]:
+ parenttype = entitytype
+ else:
+ raise EntityClassDefinitionError(
+ f"Error in entity type definition: "
+ f"'{entitytype}' is not a valid entity type."
+ )
+ for thing in parents:
+ if not isinstance(thing, parenttype):
+ raise EntityClassDefinitionError(
+ f"Error in parent definition: "
+ f"'{thing}' is not an {parenttype}."
+ )
+
+ with self:
+ entity = types.new_class(name, parents)
+
+ preflabel_iri = "http://www.w3.org/2004/02/skos/core#prefLabel"
+ if preflabel:
+ if not self.world[preflabel_iri]:
+ pref_label = self.new_annotation_property(
+ "prefLabel",
+ parent=[owlready2.AnnotationProperty],
+ )
+ pref_label.iri = preflabel_iri
+ entity.prefLabel = english(preflabel)
+ elif (
+ preflabel is None
+ and preflabel_iri in self.label_annotations
+ and self.world[preflabel_iri]
+ ):
+ entity.prefLabel = english(name)
+
+ return entity
+
+ # Method that creates new ThingClass using new_entity
+ def new_class(
+ self, name: str, parent: Union[ThingClass, Iterable]
+ ) -> ThingClass:
+ """Create and return new class.
+
+ Args:
+ name: name of the class
+ parent: parent(s) of the class
+
+ Returns:
+ the new class.
+ """
+ return self.new_entity(name, parent, "class")
+
+ # Method that creates new ObjectPropertyClass using new_entity
+ def new_object_property(
+ self, name: str, parent: Union[ObjectPropertyClass, Iterable]
+ ) -> ObjectPropertyClass:
+ """Create and return new object property.
+
+ Args:
+ name: name of the object property
+ parent: parent(s) of the object property
+
+ Returns:
+ the new object property.
+ """
+ return self.new_entity(name, parent, "object_property")
+
+ # Method that creates new DataPropertyClass using new_entity
+ def new_data_property(
+ self, name: str, parent: Union[DataPropertyClass, Iterable]
+ ) -> DataPropertyClass:
+ """Create and return new data property.
+
+ Args:
+ name: name of the data property
+ parent: parent(s) of the data property
+
+ Returns:
+ the new data property.
+ """
+ return self.new_entity(name, parent, "data_property")
+
+ # Method that creates new AnnotationPropertyClass using new_entity
+ def new_annotation_property(
+ self, name: str, parent: Union[AnnotationPropertyClass, Iterable]
+ ) -> AnnotationPropertyClass:
+ """Create and return new annotation property.
+
+ Args:
+ name: name of the annotation property
+ parent: parent(s) of the annotation property
+
+ Returns:
+ the new annotation property.
+ """
+ return self.new_entity(name, parent, "annotation_property")
+
+ def difference(self, other: owlready2.Ontology) -> set:
+ """Return a set of triples that are in this, but not in the
+ `other` ontology."""
+ # pylint: disable=invalid-name
+ s1 = set(self.get_unabbreviated_triples(blank="_:b"))
+ s2 = set(other.get_unabbreviated_triples(blank="_:b"))
+ return s1.difference(s2)
+
colon_in_label
+
+
+ property
+ writable
+
+
+¶Whether to accept colon in name-part of IRI. If true, the name cannot be prefixed.
+dir_imported
+
+
+ property
+ writable
+
+
+¶Whether to include imported ontologies in dir() listing.
+dir_label
+
+
+ property
+ writable
+
+
+¶Whether to include entity label in dir() listing.
+dir_name
+
+
+ property
+ writable
+
+
+¶Whether to include entity name in dir() listing.
+dir_preflabel
+
+
+ property
+ writable
+
+
+¶Whether to include entity prefLabel in dir() listing.
+add_label_annotation(self, iri)
+
+
+¶Adds label annotation used by get_by_label().
+ +ontopy/ontology.py
def add_label_annotation(self, iri):
+ """Adds label annotation used by get_by_label()."""
+ warnings.warn(
+ "Ontology.add_label_annotations() is deprecated. "
+ "Direct modify the `label_annotations` attribute instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if hasattr(iri, "iri"):
+ iri = iri.iri
+ if iri not in self.label_annotations:
+ self.label_annotations.append(iri)
+
annotation_properties(self, imported=False)
+
+
+¶Returns an generator over all annotation_properties.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
imported |
+ + | if |
+ False |
+
ontopy/ontology.py
def annotation_properties(self, imported=False):
+ """Returns an generator over all annotation_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+
+ """
+ return self._entities("annotation_properties", imported=imported)
+
classes(self, imported=False)
+
+
+¶Returns an generator over all classes.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
imported |
+ + | if |
+ False |
+
ontopy/ontology.py
def classes(self, imported=False):
+ """Returns an generator over all classes.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("classes", imported=imported)
+
closest_common_ancestor(*classes)
+
+
+ staticmethod
+
+
+¶Returns closest_common_ancestor for the given classes.
+ +ontopy/ontology.py
@staticmethod
+def closest_common_ancestor(*classes):
+ """Returns closest_common_ancestor for the given classes."""
+ mros = [cls.mro() for cls in classes]
+ track = defaultdict(int)
+ while mros:
+ for mro in mros:
+ cur = mro.pop(0)
+ track[cur] += 1
+ if track[cur] == len(classes):
+ return cur
+ if len(mro) == 0:
+ mros.remove(mro)
+ raise EMMOntoPyException(
+ "A closest common ancestor should always exist !"
+ )
+
closest_common_ancestors(self, cls1, cls2)
+
+
+¶Returns a list with closest_common_ancestor for cls1 and cls2
+ +ontopy/ontology.py
def closest_common_ancestors(self, cls1, cls2):
+ """Returns a list with closest_common_ancestor for cls1 and cls2"""
+ distances = {}
+ for ancestor in self.common_ancestors(cls1, cls2):
+ distances[ancestor] = self.number_of_generations(
+ cls1, ancestor
+ ) + self.number_of_generations(cls2, ancestor)
+ return [
+ ancestor
+ for ancestor, distance in distances.items()
+ if distance == min(distances.values())
+ ]
+
common_ancestors(cls1, cls2)
+
+
+ staticmethod
+
+
+¶Return a list of common ancestors for cls1
and cls2
.
ontopy/ontology.py
@staticmethod
+def common_ancestors(cls1, cls2):
+ """Return a list of common ancestors for `cls1` and `cls2`."""
+ return set(cls1.ancestors()).intersection(cls2.ancestors())
+
data_properties(self, imported=False)
+
+
+¶Returns an generator over all data_properties.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
imported |
+ + | if |
+ False |
+
ontopy/ontology.py
def data_properties(self, imported=False):
+ """Returns an generator over all data_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("data_properties", imported=imported)
+
difference(self, other)
+
+
+¶Return a set of triples that are in this, but not in the
+other
ontology.
ontopy/ontology.py
def difference(self, other: owlready2.Ontology) -> set:
+ """Return a set of triples that are in this, but not in the
+ `other` ontology."""
+ # pylint: disable=invalid-name
+ s1 = set(self.get_unabbreviated_triples(blank="_:b"))
+ s2 = set(other.get_unabbreviated_triples(blank="_:b"))
+ return s1.difference(s2)
+
get_ancestors(self, classes, closest=False, generations=None, strict=True)
+
+
+¶Return ancestors of all classes in classes
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
classes |
+ Union[List, ThingClass] |
+ class(es) for which ancestors should be returned. |
+ required | +
generations |
+ int |
+ Include this number of generations, default is all. |
+ None |
+
closest |
+ bool |
+ If True, return all ancestors up to and including the +closest common ancestor. Return all if False. |
+ False |
+
strict |
+ bool |
+ If True returns only real ancestors, i.e. |
+ True |
+
Returns:
+Type | +Description | +
---|---|
set |
+ Set of ancestors to |
+
ontopy/ontology.py
def get_ancestors(
+ self,
+ classes: "Union[List, ThingClass]",
+ closest: bool = False,
+ generations: int = None,
+ strict: bool = True,
+) -> set:
+ """Return ancestors of all classes in `classes`.
+ Args:
+ classes: class(es) for which ancestors should be returned.
+ generations: Include this number of generations, default is all.
+ closest: If True, return all ancestors up to and including the
+ closest common ancestor. Return all if False.
+ strict: If True returns only real ancestors, i.e. `classes` are
+ are not included in the returned set.
+ Returns:
+ Set of ancestors to `classes`.
+ """
+ if not isinstance(classes, Iterable):
+ classes = [classes]
+
+ ancestors = set()
+ if not classes:
+ return ancestors
+
+ def addancestors(entity, counter, subject):
+ if counter > 0:
+ for parent in entity.get_parents(strict=True):
+ subject.add(parent)
+ addancestors(parent, counter - 1, subject)
+
+ if closest:
+ if generations is not None:
+ raise ValueError(
+ "Only one of `generations` or `closest` may be specified."
+ )
+
+ closest_ancestor = self.closest_common_ancestor(*classes)
+ for cls in classes:
+ ancestors.update(
+ anc
+ for anc in cls.ancestors()
+ if closest_ancestor in anc.ancestors()
+ )
+ elif isinstance(generations, int):
+ for entity in classes:
+ addancestors(entity, generations, ancestors)
+ else:
+ ancestors.update(*(cls.ancestors() for cls in classes))
+
+ if strict:
+ return ancestors.difference(classes)
+ return ancestors
+
get_annotations(self, entity)
+
+
+¶Returns a dict with annotations for entity
. Entity may be given
+either as a ThingClass object or as a label.
ontopy/ontology.py
def get_annotations(self, entity):
+ """Returns a dict with annotations for `entity`. Entity may be given
+ either as a ThingClass object or as a label."""
+ warnings.warn(
+ "Ontology.get_annotations(entity) is deprecated. Use "
+ "entity.get_annotations() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ res = {"comment": getattr(entity, "comment", "")}
+ for annotation in self.annotation_properties():
+ res[annotation.label.first()] = [
+ obj.strip('"')
+ for _, _, obj in self.get_triples(
+ entity.storid, annotation.storid, None
+ )
+ ]
+ return res
+
get_branch(self, root, leaves=(), include_leaves=True, strict_leaves=False, exclude=None, sort=False)
+
+
+¶Returns a set with all direct and indirect subclasses of root
.
+Any subclass found in the sequence leaves
will be included in
+the returned list, but its subclasses will not. The elements
+of leaves
may be ThingClass objects or labels.
Subclasses of any subclass found in the sequence leaves
will
+be excluded from the returned list, where the elements of leaves
+may be ThingClass objects or labels.
If include_leaves
is true, the leaves are included in the returned
+list, otherwise they are not.
If strict_leaves
is true, any descendant of a leaf will be excluded
+in the returned set.
If given, exclude
may be a sequence of classes, including
+their subclasses, to exclude from the output.
If sort
is True, a list sorted according to depth and label
+will be returned instead of a set.
ontopy/ontology.py
def get_branch( # pylint: disable=too-many-arguments
+ self,
+ root,
+ leaves=(),
+ include_leaves=True,
+ strict_leaves=False,
+ exclude=None,
+ sort=False,
+):
+ """Returns a set with all direct and indirect subclasses of `root`.
+ Any subclass found in the sequence `leaves` will be included in
+ the returned list, but its subclasses will not. The elements
+ of `leaves` may be ThingClass objects or labels.
+
+ Subclasses of any subclass found in the sequence `leaves` will
+ be excluded from the returned list, where the elements of `leaves`
+ may be ThingClass objects or labels.
+
+ If `include_leaves` is true, the leaves are included in the returned
+ list, otherwise they are not.
+
+ If `strict_leaves` is true, any descendant of a leaf will be excluded
+ in the returned set.
+
+ If given, `exclude` may be a sequence of classes, including
+ their subclasses, to exclude from the output.
+
+ If `sort` is True, a list sorted according to depth and label
+ will be returned instead of a set.
+ """
+
+ def _branch(root, leaves):
+ if root not in leaves:
+ branch = {
+ root,
+ }
+ for cls in root.subclasses():
+ # Defining a branch is actually quite tricky. Consider
+ # the case:
+ #
+ # L isA R
+ # A isA L
+ # A isA R
+ #
+ # where R is the root, L is a leaf and A is a direct
+ # child of both. Logically, since A is a child of the
+ # leaf we want to skip A. But a strait forward imple-
+ # mentation will see that A is a child of the root and
+ # include it. Requireing that the R should be a strict
+ # parent of A solves this.
+ if root in cls.get_parents(strict=True):
+ branch.update(_branch(cls, leaves))
+ else:
+ branch = (
+ {
+ root,
+ }
+ if include_leaves
+ else set()
+ )
+ return branch
+
+ if isinstance(root, str):
+ root = self.get_by_label(root)
+
+ leaves = set(
+ self.get_by_label(leaf) if isinstance(leaf, str) else leaf
+ for leaf in leaves
+ )
+ leaves.discard(root)
+
+ if exclude:
+ exclude = set(
+ self.get_by_label(e) if isinstance(e, str) else e
+ for e in exclude
+ )
+ leaves.update(exclude)
+
+ branch = _branch(root, leaves)
+
+ # Exclude all descendants of any leaf
+ if strict_leaves:
+ descendants = root.descendants()
+ for leaf in leaves:
+ if leaf in descendants:
+ branch.difference_update(
+ leaf.descendants(include_self=False)
+ )
+
+ if exclude:
+ branch.difference_update(exclude)
+
+ # Sort according to depth, then by label
+ if sort:
+ branch = sorted(
+ sorted(branch, key=asstring),
+ key=lambda x: len(x.mro()),
+ )
+
+ return branch
+
get_by_label(self, label, label_annotations=None, prefix=None, imported=True, colon_in_label=None)
+
+
+¶Returns entity with label annotation label
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
label |
+ str |
+ label so search for. + May be written as 'label' or 'prefix:label'. + get_by_label('prefix:label') == + get_by_label('label', prefix='prefix'). |
+ required | +
label_annotations |
+ str |
+ a sequence of label annotation names to look up.
+ Defaults to the |
+ None |
+
prefix |
+ str |
+ if provided, it should be the last component of + the base iri of an ontology (with trailing slash (/) or hash + (#) stripped off). The search for a matching label will be + limited to this namespace. |
+ None |
+
imported |
+ bool |
+ Whether to also look for |
+ True |
+
colon_in_label |
+ bool |
+ Whether to accept colon (:) in a label or name-part
+ of IRI. Defaults to the |
+ None |
+
If several entities have the same label, only the one which is +found first is returned.Use get_by_label_all() to get all matches.
+Note, if different prefixes are provided in the label and via
+the prefix
argument a warning will be issued and the
+prefix
argument will take precedence.
A NoSuchLabelError is raised if label
cannot be found.
ontopy/ontology.py
def get_by_label(
+ self,
+ label: str,
+ label_annotations: str = None,
+ prefix: str = None,
+ imported: bool = True,
+ colon_in_label: bool = None,
+):
+ """Returns entity with label annotation `label`.
+
+ Arguments:
+ label: label so search for.
+ May be written as 'label' or 'prefix:label'.
+ get_by_label('prefix:label') ==
+ get_by_label('label', prefix='prefix').
+ label_annotations: a sequence of label annotation names to look up.
+ Defaults to the `label_annotations` property.
+ prefix: if provided, it should be the last component of
+ the base iri of an ontology (with trailing slash (/) or hash
+ (#) stripped off). The search for a matching label will be
+ limited to this namespace.
+ imported: Whether to also look for `label` in imported ontologies.
+ colon_in_label: Whether to accept colon (:) in a label or name-part
+ of IRI. Defaults to the `colon_in_label` property of `self`.
+ Setting this true cannot be combined with `prefix`.
+
+ If several entities have the same label, only the one which is
+ found first is returned.Use get_by_label_all() to get all matches.
+
+ Note, if different prefixes are provided in the label and via
+ the `prefix` argument a warning will be issued and the
+ `prefix` argument will take precedence.
+
+ A NoSuchLabelError is raised if `label` cannot be found.
+ """
+ # pylint: disable=too-many-arguments,too-many-branches,invalid-name
+ if not isinstance(label, str):
+ raise TypeError(
+ f"Invalid label definition, must be a string: '{label}'"
+ )
+
+ if label_annotations is None:
+ label_annotations = self.label_annotations
+
+ if colon_in_label is None:
+ colon_in_label = self._colon_in_label
+ if colon_in_label:
+ if prefix:
+ raise ValueError(
+ "`prefix` cannot be combined with `colon_in_label`"
+ )
+ else:
+ splitlabel = label.split(":", 1)
+ if len(splitlabel) == 2 and not splitlabel[1].startswith("//"):
+ label = splitlabel[1]
+ if prefix and prefix != splitlabel[0]:
+ warnings.warn(
+ f"Prefix given both as argument ({prefix}) "
+ f"and in label ({splitlabel[0]}). "
+ "Prefix given in argument takes precedence. "
+ )
+ if not prefix:
+ prefix = splitlabel[0]
+
+ if prefix:
+ entityset = self.get_by_label_all(
+ label,
+ label_annotations=label_annotations,
+ prefix=prefix,
+ )
+ if len(entityset) == 1:
+ return entityset.pop()
+ if len(entityset) > 1:
+ raise AmbiguousLabelError(
+ f"Several entities have the same label '{label}' "
+ f"with prefix '{prefix}'."
+ )
+ raise NoSuchLabelError(
+ f"No label annotations matches for '{label}' "
+ f"with prefix '{prefix}'."
+ )
+
+ # Label is a full IRI
+ entity = self.world[label]
+ if entity:
+ return entity
+
+ get_triples = (
+ self.world._get_data_triples_spod_spod
+ if imported
+ else self._get_data_triples_spod_spod
+ )
+
+ for storid in self._to_storids(label_annotations):
+ for s, _, _, _ in get_triples(None, storid, label, None):
+ return self.world[self._unabbreviate(s)]
+
+ # Special labels
+ if self._special_labels and label in self._special_labels:
+ return self._special_labels[label]
+
+ # Check if label is a name under base_iri
+ entity = self.world[self.base_iri + label]
+ if entity:
+ return entity
+
+ # Check label is the name of an entity
+ for entity in self.get_entities(imported=imported):
+ if label == entity.name:
+ return entity
+
+ raise NoSuchLabelError(f"No label annotations matches '{label}'")
+
get_by_label_all(self, label, label_annotations=None, prefix=None, exact_match=False)
+
+
+¶Returns set of entities with label annotation label
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
label |
+ + | label so search for.
+ May be written as 'label' or 'prefix:label'. Wildcard matching
+ using glob pattern is also supported if |
+ required | +
label_annotations |
+ + | a sequence of label annotation names to look up.
+ Defaults to the |
+ None |
+
prefix |
+ + | if provided, it should be the last component of + the base iri of an ontology (with trailing slash (/) or hash + (#) stripped off). The search for a matching label will be + limited to this namespace. |
+ None |
+
exact_match |
+ + | Do not treat "*" and brackets as special characters + when matching. May be useful if your ontology has labels + containing such labels. |
+ False |
+
Returns:
+Type | +Description | +
---|---|
Set[Optional[owlready2.entity.EntityClass]] |
+ Set of all matching entities or an empty set if no matches +could be found. |
+
ontopy/ontology.py
def get_by_label_all(
+ self,
+ label,
+ label_annotations=None,
+ prefix=None,
+ exact_match=False,
+) -> "Set[Optional[owlready2.entity.EntityClass]]":
+ """Returns set of entities with label annotation `label`.
+
+ Arguments:
+ label: label so search for.
+ May be written as 'label' or 'prefix:label'. Wildcard matching
+ using glob pattern is also supported if `exact_match` is set to
+ false.
+ label_annotations: a sequence of label annotation names to look up.
+ Defaults to the `label_annotations` property.
+ prefix: if provided, it should be the last component of
+ the base iri of an ontology (with trailing slash (/) or hash
+ (#) stripped off). The search for a matching label will be
+ limited to this namespace.
+ exact_match: Do not treat "*" and brackets as special characters
+ when matching. May be useful if your ontology has labels
+ containing such labels.
+
+ Returns:
+ Set of all matching entities or an empty set if no matches
+ could be found.
+ """
+ if not isinstance(label, str):
+ raise TypeError(
+ f"Invalid label definition, " f"must be a string: {label!r}"
+ )
+ if " " in label:
+ raise ValueError(
+ f"Invalid label definition, {label!r} contains spaces."
+ )
+
+ if label_annotations is None:
+ label_annotations = self.label_annotations
+
+ entities = set()
+
+ # Check label annotations
+ if exact_match:
+ for storid in self._to_storids(label_annotations):
+ entities.update(
+ self.world._get_by_storid(s)
+ for s, _, _ in self.world._get_data_triples_spod_spod(
+ None, storid, str(label), None
+ )
+ )
+ else:
+ for storid in self._to_storids(label_annotations):
+ label_entity = self._unabbreviate(storid)
+ key = (
+ label_entity.name
+ if hasattr(label_entity, "name")
+ else label_entity
+ )
+ entities.update(self.world.search(**{key: label}))
+
+ if self._special_labels and label in self._special_labels:
+ entities.update(self._special_labels[label])
+
+ # Check name-part of IRI
+ if exact_match:
+ entities.update(
+ ent for ent in self.get_entities() if ent.name == str(label)
+ )
+ else:
+ matches = fnmatch.filter(
+ (ent.name for ent in self.get_entities()), label
+ )
+ entities.update(
+ ent for ent in self.get_entities() if ent.name in matches
+ )
+
+ if prefix:
+ return set(
+ ent
+ for ent in entities
+ if ent.namespace.ontology.prefix == prefix
+ )
+ return entities
+
get_descendants(self, classes, generations=None, common=False)
+
+
+¶Return descendants/subclasses of all classes in classes
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
classes |
+ Union[List, ThingClass] |
+ class(es) for which descendants are desired. |
+ required | +
common |
+ bool |
+ whether to only return descendants common to all classes. |
+ False |
+
generations |
+ int |
+ Include this number of generations, default is all. |
+ None |
+
Returns:
+Type | +Description | +
---|---|
set |
+ A set of descendants for given number of generations. +If 'common'=True, the common descendants are returned +within the specified number of generations. +'generations' defaults to all. |
+
ontopy/ontology.py
def get_descendants(
+ self,
+ classes: "Union[List, ThingClass]",
+ generations: int = None,
+ common: bool = False,
+) -> set:
+ """Return descendants/subclasses of all classes in `classes`.
+ Args:
+ classes: class(es) for which descendants are desired.
+ common: whether to only return descendants common to all classes.
+ generations: Include this number of generations, default is all.
+ Returns:
+ A set of descendants for given number of generations.
+ If 'common'=True, the common descendants are returned
+ within the specified number of generations.
+ 'generations' defaults to all.
+ """
+
+ if not isinstance(classes, Iterable):
+ classes = [classes]
+
+ descendants = {name: [] for name in classes}
+
+ def _children_recursively(num, newentity, parent, descendants):
+ """Helper function to get all children up to generation."""
+ for child in self.get_children_of(newentity):
+ descendants[parent].append(child)
+ if num < generations:
+ _children_recursively(num + 1, child, parent, descendants)
+
+ if generations == 0:
+ return set()
+
+ if not generations:
+ for entity in classes:
+ descendants[entity] = entity.descendants()
+ # only include proper descendants
+ descendants[entity].remove(entity)
+ else:
+ for entity in classes:
+ _children_recursively(1, entity, entity, descendants)
+
+ results = descendants.values()
+ if common is True:
+ return set.intersection(*map(set, results))
+ return set(flatten(results))
+
get_entities(self, imported=True, classes=True, individuals=True, object_properties=True, data_properties=True, annotation_properties=True)
+
+
+¶Return a generator over (optionally) all classes, individuals, +object_properties, data_properties and annotation_properties.
+If imported
is True
, entities in imported ontologies will also
+be included.
ontopy/ontology.py
def get_entities( # pylint: disable=too-many-arguments
+ self,
+ imported=True,
+ classes=True,
+ individuals=True,
+ object_properties=True,
+ data_properties=True,
+ annotation_properties=True,
+):
+ """Return a generator over (optionally) all classes, individuals,
+ object_properties, data_properties and annotation_properties.
+
+ If `imported` is `True`, entities in imported ontologies will also
+ be included.
+ """
+ generator = []
+ if classes:
+ generator.append(self.classes(imported))
+ if individuals:
+ generator.append(self.individuals(imported))
+ if object_properties:
+ generator.append(self.object_properties(imported))
+ if data_properties:
+ generator.append(self.data_properties(imported))
+ if annotation_properties:
+ generator.append(self.annotation_properties(imported))
+ for entity in itertools.chain(*generator):
+ yield entity
+
get_graph(self, **kwargs)
+
+
+¶Returns a new graph object. See emmo.graph.OntoGraph.
+Note that this method requires the Python graphviz package.
+ +ontopy/ontology.py
def get_graph(self, **kwargs):
+ """Returns a new graph object. See emmo.graph.OntoGraph.
+
+ Note that this method requires the Python graphviz package.
+ """
+ # pylint: disable=import-outside-toplevel,cyclic-import
+ from ontopy.graph import OntoGraph
+
+ return OntoGraph(self, **kwargs)
+
get_imported_ontologies(self, recursive=False)
+
+
+¶Return a list with imported ontologies.
+If recursive
is True
, ontologies imported by imported ontologies
+are also returned.
ontopy/ontology.py
def get_imported_ontologies(self, recursive=False):
+ """Return a list with imported ontologies.
+
+ If `recursive` is `True`, ontologies imported by imported ontologies
+ are also returned.
+ """
+
+ def rec_imported(onto):
+ for ontology in onto.imported_ontologies:
+ if ontology not in imported:
+ imported.add(ontology)
+ rec_imported(ontology)
+
+ if recursive:
+ imported = set()
+ rec_imported(self)
+ return list(imported)
+
+ return self.imported_ontologies
+
get_relations(self)
+
+
+¶Returns a generator for all relations.
+ +ontopy/ontology.py
def get_relations(self):
+ """Returns a generator for all relations."""
+ warnings.warn(
+ "Ontology.get_relations() is deprecated. Use "
+ "onto.object_properties() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.object_properties()
+
get_root_classes(self, imported=False)
+
+
+¶Returns a list or root classes.
+ +ontopy/ontology.py
def get_root_classes(self, imported=False):
+ """Returns a list or root classes."""
+ return [
+ cls
+ for cls in self.classes(imported=imported)
+ if not cls.ancestors().difference(set([cls, owlready2.Thing]))
+ ]
+
get_root_data_properties(self, imported=False)
+
+
+¶Returns a list of root object properties.
+ +ontopy/ontology.py
def get_root_data_properties(self, imported=False):
+ """Returns a list of root object properties."""
+ props = set(self.data_properties(imported=imported))
+ return [p for p in props if not props.intersection(p.is_a)]
+
get_root_object_properties(self, imported=False)
+
+
+¶Returns a list of root object properties.
+ +ontopy/ontology.py
def get_root_object_properties(self, imported=False):
+ """Returns a list of root object properties."""
+ props = set(self.object_properties(imported=imported))
+ return [p for p in props if not props.intersection(p.is_a)]
+
get_roots(self, imported=False)
+
+
+¶Returns all class, object_property and data_property roots.
+ +ontopy/ontology.py
def get_roots(self, imported=False):
+ """Returns all class, object_property and data_property roots."""
+ roots = self.get_root_classes(imported=imported)
+ roots.extend(self.get_root_object_properties(imported=imported))
+ roots.extend(self.get_root_data_properties(imported=imported))
+ return roots
+
get_unabbreviated_triples(self, subject=None, predicate=None, obj=None, blank=None)
+
+
+¶Returns all matching triples unabbreviated.
+If blank
is given, it will be used to represent blank nodes.
ontopy/ontology.py
def get_unabbreviated_triples(
+ self, subject=None, predicate=None, obj=None, blank=None
+):
+ """Returns all matching triples unabbreviated.
+
+ If `blank` is given, it will be used to represent blank nodes.
+ """
+ # pylint: disable=invalid-name
+ return _get_unabbreviated_triples(
+ self, subject=subject, predicate=predicate, obj=obj, blank=blank
+ )
+
get_version(self, as_iri=False)
+
+
+¶Returns the version number of the ontology as inferred from the +owl:versionIRI tag or, if owl:versionIRI is not found, from +owl:versionINFO.
+If as_iri
is True, the full versionIRI is returned.
ontopy/ontology.py
def get_version(self, as_iri=False) -> str:
+ """Returns the version number of the ontology as inferred from the
+ owl:versionIRI tag or, if owl:versionIRI is not found, from
+ owl:versionINFO.
+
+ If `as_iri` is True, the full versionIRI is returned.
+ """
+ version_iri_storid = self.world._abbreviate(
+ "http://www.w3.org/2002/07/owl#versionIRI"
+ )
+ tokens = self.get_triples(s=self.storid, p=version_iri_storid)
+ if (not tokens) and (as_iri is True):
+ raise TypeError(
+ "No owl:versionIRI "
+ f"in Ontology {self.base_iri!r}. "
+ "Search for owl:versionInfo with as_iri=False"
+ )
+ if tokens:
+ _, _, obj = tokens[0]
+ version_iri = self.world._unabbreviate(obj)
+ if as_iri:
+ return version_iri
+ return infer_version(self.base_iri, version_iri)
+
+ version_info_storid = self.world._abbreviate(
+ "http://www.w3.org/2002/07/owl#versionInfo"
+ )
+ tokens = self.get_triples(s=self.storid, p=version_info_storid)
+ if not tokens:
+ raise TypeError(
+ "No versionIRI or versionInfo " f"in Ontology {self.base_iri!r}"
+ )
+ _, _, version_info = tokens[0]
+ return version_info.split("^^")[0].strip('"')
+
get_wu_palmer_measure(self, cls1, cls2)
+
+
+¶Return Wu-Palmer measure for semantic similarity.
+Returns Wu-Palmer measure for semantic similarity between +two concepts. +Wu, Palmer; ACL 94: Proceedings of the 32nd annual meeting on +Association for Computational Linguistics, June 1994.
+ +ontopy/ontology.py
def get_wu_palmer_measure(self, cls1, cls2):
+ """Return Wu-Palmer measure for semantic similarity.
+
+ Returns Wu-Palmer measure for semantic similarity between
+ two concepts.
+ Wu, Palmer; ACL 94: Proceedings of the 32nd annual meeting on
+ Association for Computational Linguistics, June 1994.
+ """
+ cca = self.closest_common_ancestor(cls1, cls2)
+ ccadepth = self.number_of_generations(cca, self.Thing)
+ generations1 = self.number_of_generations(cls1, cca)
+ generations2 = self.number_of_generations(cls2, cca)
+ return 2 * ccadepth / (generations1 + generations2 + 2 * ccadepth)
+
individuals(self, imported=False)
+
+
+¶Returns an generator over all individuals.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
imported |
+ + | if |
+ False |
+
ontopy/ontology.py
def individuals(self, imported=False):
+ """Returns an generator over all individuals.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("individuals", imported=imported)
+
is_defined(self, entity)
+
+
+¶Returns true if the entity is a defined class.
+Deprecated, use the is_defined
property of the classes
+(ThingClass subclasses) instead.
ontopy/ontology.py
def is_defined(self, entity):
+ """Returns true if the entity is a defined class.
+
+ Deprecated, use the `is_defined` property of the classes
+ (ThingClass subclasses) instead.
+ """
+ warnings.warn(
+ "This method is deprecated. Use the `is_defined` property of "
+ "the classes instad.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ return hasattr(entity, "equivalent_to") and bool(entity.equivalent_to)
+
is_individual(self, entity)
+
+
+¶Returns true if entity is an individual.
+ +ontopy/ontology.py
def is_individual(self, entity):
+ """Returns true if entity is an individual."""
+ if isinstance(entity, str):
+ entity = self.get_by_label(entity)
+ return isinstance(entity, owlready2.Thing)
+
load(self, only_local=False, filename=None, format=None, reload=None, reload_if_newer=False, url_from_catalog=None, catalog_file='catalog-v001.xml', emmo_based=True, prefix=None, prefix_emmo=None, **kwargs)
+
+
+¶Load the ontology.
+bool
+Whether to only read local files. This requires that you +have appended the path to the ontology to owlready2.onto_path.
+str
+Path to file to load the ontology from. Defaults to base_iri
+provided to get_ontology().
str
+Format of filename
. Default is inferred from filename
+extension.
bool
+Whether to reload the ontology if it is already loaded.
+bool
+Whether to reload the ontology if the source has changed since +last time it was loaded.
+bool | None
+Whether to use catalog file to resolve the location of base_iri
.
+If None, the catalog file is used if it exists in the same
+directory as filename
.
str
+Name of Protègè catalog file in the same folder as the
+ontology. This option is used together with only_local
and
+defaults to "catalog-v001.xml".
bool
+Whether this is an EMMO-based ontology or not, default True
.
prefix: defaults to self.get_namespace.name if
+bool, default None. If emmo_based is True it
+defaults to True and sets the prefix of all imported ontologies +with base_iri starting with 'http://emmo.info/emmo' to emmo
+Kwargs
+Additional keyword arguments are passed on to +owlready2.Ontology.load().
+ontopy/ontology.py
def load( # pylint: disable=too-many-arguments,arguments-renamed
+ self,
+ only_local=False,
+ filename=None,
+ format=None, # pylint: disable=redefined-builtin
+ reload=None,
+ reload_if_newer=False,
+ url_from_catalog=None,
+ catalog_file="catalog-v001.xml",
+ emmo_based=True,
+ prefix=None,
+ prefix_emmo=None,
+ **kwargs,
+):
+ """Load the ontology.
+
+ Arguments
+ ---------
+ only_local: bool
+ Whether to only read local files. This requires that you
+ have appended the path to the ontology to owlready2.onto_path.
+ filename: str
+ Path to file to load the ontology from. Defaults to `base_iri`
+ provided to get_ontology().
+ format: str
+ Format of `filename`. Default is inferred from `filename`
+ extension.
+ reload: bool
+ Whether to reload the ontology if it is already loaded.
+ reload_if_newer: bool
+ Whether to reload the ontology if the source has changed since
+ last time it was loaded.
+ url_from_catalog: bool | None
+ Whether to use catalog file to resolve the location of `base_iri`.
+ If None, the catalog file is used if it exists in the same
+ directory as `filename`.
+ catalog_file: str
+ Name of Protègè catalog file in the same folder as the
+ ontology. This option is used together with `only_local` and
+ defaults to "catalog-v001.xml".
+ emmo_based: bool
+ Whether this is an EMMO-based ontology or not, default `True`.
+ prefix: defaults to self.get_namespace.name if
+ prefix_emmo: bool, default None. If emmo_based is True it
+ defaults to True and sets the prefix of all imported ontologies
+ with base_iri starting with 'http://emmo.info/emmo' to emmo
+ kwargs:
+ Additional keyword arguments are passed on to
+ owlready2.Ontology.load().
+ """
+ # TODO: make sure that `only_local` argument is respected...
+
+ if self.loaded:
+ return self
+ self._load(
+ only_local=only_local,
+ filename=filename,
+ format=format,
+ reload=reload,
+ reload_if_newer=reload_if_newer,
+ url_from_catalog=url_from_catalog,
+ catalog_file=catalog_file,
+ **kwargs,
+ )
+
+ # Enable optimised search by get_by_label()
+ if self._special_labels is None and emmo_based:
+ top = self.world["http://www.w3.org/2002/07/owl#topObjectProperty"]
+ self._special_labels = {
+ "Thing": owlready2.Thing,
+ "Nothing": owlready2.Nothing,
+ "topObjectProperty": top,
+ "owl:Thing": owlready2.Thing,
+ "owl:Nothing": owlready2.Nothing,
+ "owl:topObjectProperty": top,
+ }
+ # set prefix if another prefix is desired
+ # if we do this, shouldn't we make the name of all
+ # entities of the given ontology to the same?
+ if prefix:
+ self.prefix = prefix
+ else:
+ self.prefix = self.name
+
+ if emmo_based and prefix_emmo is None:
+ prefix_emmo = True
+ if prefix_emmo:
+ self.set_common_prefix()
+
+ return self
+
new_annotation_property(self, name, parent)
+
+
+¶Create and return new annotation property.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name |
+ str |
+ name of the annotation property |
+ required | +
parent |
+ Union[owlready2.annotation.AnnotationPropertyClass, collections.abc.Iterable] |
+ parent(s) of the annotation property |
+ required | +
Returns:
+Type | +Description | +
---|---|
AnnotationPropertyClass |
+ the new annotation property. |
+
ontopy/ontology.py
def new_annotation_property(
+ self, name: str, parent: Union[AnnotationPropertyClass, Iterable]
+) -> AnnotationPropertyClass:
+ """Create and return new annotation property.
+
+ Args:
+ name: name of the annotation property
+ parent: parent(s) of the annotation property
+
+ Returns:
+ the new annotation property.
+ """
+ return self.new_entity(name, parent, "annotation_property")
+
new_class(self, name, parent)
+
+
+¶Create and return new class.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name |
+ str |
+ name of the class |
+ required | +
parent |
+ Union[owlready2.entity.ThingClass, collections.abc.Iterable] |
+ parent(s) of the class |
+ required | +
Returns:
+Type | +Description | +
---|---|
ThingClass |
+ the new class. |
+
ontopy/ontology.py
def new_class(
+ self, name: str, parent: Union[ThingClass, Iterable]
+) -> ThingClass:
+ """Create and return new class.
+
+ Args:
+ name: name of the class
+ parent: parent(s) of the class
+
+ Returns:
+ the new class.
+ """
+ return self.new_entity(name, parent, "class")
+
new_data_property(self, name, parent)
+
+
+¶Create and return new data property.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name |
+ str |
+ name of the data property |
+ required | +
parent |
+ Union[owlready2.prop.DataPropertyClass, collections.abc.Iterable] |
+ parent(s) of the data property |
+ required | +
Returns:
+Type | +Description | +
---|---|
DataPropertyClass |
+ the new data property. |
+
ontopy/ontology.py
def new_data_property(
+ self, name: str, parent: Union[DataPropertyClass, Iterable]
+) -> DataPropertyClass:
+ """Create and return new data property.
+
+ Args:
+ name: name of the data property
+ parent: parent(s) of the data property
+
+ Returns:
+ the new data property.
+ """
+ return self.new_entity(name, parent, "data_property")
+
new_entity(self, name, parent, entitytype='class', preflabel=None)
+
+
+¶Create and return new entity
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name |
+ str |
+ name of the entity |
+ required | +
parent |
+ Union[owlready2.entity.ThingClass, owlready2.prop.ObjectPropertyClass, owlready2.prop.DataPropertyClass, owlready2.annotation.AnnotationPropertyClass, collections.abc.Iterable] |
+ parent(s) of the entity |
+ required | +
entitytype |
+ Union[str, owlready2.entity.ThingClass, owlready2.prop.ObjectPropertyClass, owlready2.prop.DataPropertyClass, owlready2.annotation.AnnotationPropertyClass] |
+ type of the entity, +default is 'class' (str) 'ThingClass' (owlready2 Python class). +Other options +are 'data_property', 'object_property', +'annotation_property' (strings) or the +Python classes ObjectPropertyClass, +DataPropertyClass and AnnotationProperty classes. |
+ 'class' |
+
preflabel |
+ Optional[str] |
+ if given, add this as a skos:prefLabel annotation
+to the new entity. If None (default), |
+ None |
+
Returns:
+Type | +Description | +
---|---|
Union[owlready2.entity.ThingClass, owlready2.prop.ObjectPropertyClass, owlready2.prop.DataPropertyClass, owlready2.annotation.AnnotationPropertyClass] |
+ the new entity. |
+
Throws exception if name consists of more than one word, if type is not +one of the allowed types, or if parent is not of the correct type. +By default, the parent is Thing.
+ +ontopy/ontology.py
def new_entity(
+ self,
+ name: str,
+ parent: Union[
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ Iterable,
+ ],
+ entitytype: Optional[
+ Union[
+ str,
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ ]
+ ] = "class",
+ preflabel: Optional[str] = None,
+) -> Union[
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+]:
+ """Create and return new entity
+
+ Args:
+ name: name of the entity
+ parent: parent(s) of the entity
+ entitytype: type of the entity,
+ default is 'class' (str) 'ThingClass' (owlready2 Python class).
+ Other options
+ are 'data_property', 'object_property',
+ 'annotation_property' (strings) or the
+ Python classes ObjectPropertyClass,
+ DataPropertyClass and AnnotationProperty classes.
+ preflabel: if given, add this as a skos:prefLabel annotation
+ to the new entity. If None (default), `name` will
+ be added as prefLabel if skos:prefLabel is in the ontology
+ and listed in `self.label_annotations`. Set `preflabel` to
+ False, to avoid assigning a prefLabel.
+
+ Returns:
+ the new entity.
+
+ Throws exception if name consists of more than one word, if type is not
+ one of the allowed types, or if parent is not of the correct type.
+ By default, the parent is Thing.
+
+ """
+ # pylint: disable=invalid-name
+ if " " in name:
+ raise LabelDefinitionError(
+ f"Error in label name definition '{name}': "
+ f"Label consists of more than one word."
+ )
+ parents = tuple(parent) if isinstance(parent, Iterable) else (parent,)
+ if entitytype == "class":
+ parenttype = owlready2.ThingClass
+ elif entitytype == "data_property":
+ parenttype = owlready2.DataPropertyClass
+ elif entitytype == "object_property":
+ parenttype = owlready2.ObjectPropertyClass
+ elif entitytype == "annotation_property":
+ parenttype = owlready2.AnnotationPropertyClass
+ elif entitytype in [
+ ThingClass,
+ ObjectPropertyClass,
+ DataPropertyClass,
+ AnnotationPropertyClass,
+ ]:
+ parenttype = entitytype
+ else:
+ raise EntityClassDefinitionError(
+ f"Error in entity type definition: "
+ f"'{entitytype}' is not a valid entity type."
+ )
+ for thing in parents:
+ if not isinstance(thing, parenttype):
+ raise EntityClassDefinitionError(
+ f"Error in parent definition: "
+ f"'{thing}' is not an {parenttype}."
+ )
+
+ with self:
+ entity = types.new_class(name, parents)
+
+ preflabel_iri = "http://www.w3.org/2004/02/skos/core#prefLabel"
+ if preflabel:
+ if not self.world[preflabel_iri]:
+ pref_label = self.new_annotation_property(
+ "prefLabel",
+ parent=[owlready2.AnnotationProperty],
+ )
+ pref_label.iri = preflabel_iri
+ entity.prefLabel = english(preflabel)
+ elif (
+ preflabel is None
+ and preflabel_iri in self.label_annotations
+ and self.world[preflabel_iri]
+ ):
+ entity.prefLabel = english(name)
+
+ return entity
+
new_object_property(self, name, parent)
+
+
+¶Create and return new object property.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name |
+ str |
+ name of the object property |
+ required | +
parent |
+ Union[owlready2.prop.ObjectPropertyClass, collections.abc.Iterable] |
+ parent(s) of the object property |
+ required | +
Returns:
+Type | +Description | +
---|---|
ObjectPropertyClass |
+ the new object property. |
+
ontopy/ontology.py
def new_object_property(
+ self, name: str, parent: Union[ObjectPropertyClass, Iterable]
+) -> ObjectPropertyClass:
+ """Create and return new object property.
+
+ Args:
+ name: name of the object property
+ parent: parent(s) of the object property
+
+ Returns:
+ the new object property.
+ """
+ return self.new_entity(name, parent, "object_property")
+
number_of_generations(self, descendant, ancestor)
+
+
+¶Return shortest distance from ancestor to descendant
+ +ontopy/ontology.py
def number_of_generations(self, descendant, ancestor):
+ """Return shortest distance from ancestor to descendant"""
+ if ancestor not in descendant.ancestors():
+ raise ValueError("Descendant is not a descendant of ancestor")
+ return self._number_of_generations(descendant, ancestor, 0)
+
object_properties(self, imported=False)
+
+
+¶Returns an generator over all object_properties.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
imported |
+ + | if |
+ False |
+
ontopy/ontology.py
def object_properties(self, imported=False):
+ """Returns an generator over all object_properties.
+
+ Arguments:
+ imported: if `True`, entities in imported ontologies
+ are also returned.
+ """
+ return self._entities("object_properties", imported=imported)
+
remove_label_annotation(self, iri)
+
+
+¶Removes label annotation used by get_by_label().
+ +ontopy/ontology.py
def remove_label_annotation(self, iri):
+ """Removes label annotation used by get_by_label()."""
+ warnings.warn(
+ "Ontology.remove_label_annotations() is deprecated. "
+ "Direct modify the `label_annotations` attribute instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if hasattr(iri, "iri"):
+ iri = iri.iri
+ try:
+ self.label_annotations.remove(iri)
+ except ValueError:
+ pass
+
rename_entities(self, annotations=('prefLabel', 'label', 'altLabel'))
+
+
+¶Set name
of all entities to the first non-empty annotation in
+annotations
.
Warning, this method changes all IRIs in the ontology. However, +it may be useful to make the ontology more readable and to work +with it together with a triple store.
+ +ontopy/ontology.py
def rename_entities(
+ self,
+ annotations=("prefLabel", "label", "altLabel"),
+):
+ """Set `name` of all entities to the first non-empty annotation in
+ `annotations`.
+
+ Warning, this method changes all IRIs in the ontology. However,
+ it may be useful to make the ontology more readable and to work
+ with it together with a triple store.
+ """
+ for entity in self.get_entities():
+ for annotation in annotations:
+ if hasattr(entity, annotation):
+ name = getattr(entity, annotation).first()
+ if name:
+ entity.name = name
+ break
+
save(self, filename=None, format=None, dir='.', mkdir=False, overwrite=False, recursive=False, squash=False, write_catalog_file=False, append_catalog=False, catalog_file='catalog-v001.xml')
+
+
+¶Writes the ontology to file.
+None | str | Path
+Name of file to write to. If None, it defaults to the name
+of the ontology with format
as file extension.
str
+Output format. The default is to infer it from filename
.
str | Path
+If filename
is a relative path, it is a relative path to dir
.
bool
+Whether to create output directory if it does not exists.
+bool
+If true and filename
exists, remove the existing file before
+saving. The default is to append to an existing ontology.
bool
+Whether to save imported ontologies recursively. This is
+commonly combined with filename=None
, dir
and mkdir
.
bool
+If true, rdflib will be used to save the current ontology
+together with all its sub-ontologies into filename
.
+It make no sense to combine this with recursive
.
bool
+Whether to also write a catalog file to disk.
+bool
+Whether to append to an existing catalog file.
+str | Path
+Name of catalog file. If not an absolute path, it is prepended
+to dir
.
ontopy/ontology.py
def save(
+ self,
+ filename=None,
+ format=None,
+ dir=".",
+ mkdir=False,
+ overwrite=False,
+ recursive=False,
+ squash=False,
+ write_catalog_file=False,
+ append_catalog=False,
+ catalog_file="catalog-v001.xml",
+):
+ """Writes the ontology to file.
+
+ Parameters
+ ----------
+ filename: None | str | Path
+ Name of file to write to. If None, it defaults to the name
+ of the ontology with `format` as file extension.
+ format: str
+ Output format. The default is to infer it from `filename`.
+ dir: str | Path
+ If `filename` is a relative path, it is a relative path to `dir`.
+ mkdir: bool
+ Whether to create output directory if it does not exists.
+ owerwrite: bool
+ If true and `filename` exists, remove the existing file before
+ saving. The default is to append to an existing ontology.
+ recursive: bool
+ Whether to save imported ontologies recursively. This is
+ commonly combined with `filename=None`, `dir` and `mkdir`.
+ squash: bool
+ If true, rdflib will be used to save the current ontology
+ together with all its sub-ontologies into `filename`.
+ It make no sense to combine this with `recursive`.
+ write_catalog_file: bool
+ Whether to also write a catalog file to disk.
+ append_catalog: bool
+ Whether to append to an existing catalog file.
+ catalog_file: str | Path
+ Name of catalog file. If not an absolute path, it is prepended
+ to `dir`.
+ """
+ # pylint: disable=redefined-builtin,too-many-arguments
+ # pylint: disable=too-many-statements,too-many-branches
+ # pylint: disable=too-many-locals,arguments-renamed
+ if not _validate_installed_version(
+ package="rdflib", min_version="6.0.0"
+ ) and format == FMAP.get("ttl", ""):
+ from rdflib import ( # pylint: disable=import-outside-toplevel
+ __version__ as __rdflib_version__,
+ )
+
+ warnings.warn(
+ IncompatibleVersion(
+ "To correctly convert to Turtle format, rdflib must be "
+ "version 6.0.0 or greater, however, the detected rdflib "
+ "version used by your Python interpreter is "
+ f"{__rdflib_version__!r}. For more information see the "
+ "'Known issues' section of the README."
+ )
+ )
+
+ revmap = {value: key for key, value in FMAP.items()}
+ if filename is None:
+ if format:
+ fmt = revmap.get(format, format)
+ filename = f"{self.name}.{fmt}"
+ else:
+ raise TypeError("`filename` and `format` cannot both be None.")
+ filename = os.path.join(dir, filename)
+ dir = Path(filename).resolve().parent
+
+ if mkdir:
+ outdir = Path(filename).parent.resolve()
+ if not outdir.exists():
+ outdir.mkdir(parents=True)
+
+ if not format:
+ format = guess_format(filename, fmap=FMAP)
+ fmt = revmap.get(format, format)
+
+ if overwrite and filename and os.path.exists(filename):
+ os.remove(filename)
+
+ EMMO = rdflib.Namespace( # pylint:disable=invalid-name
+ "http://emmo.info/emmo#"
+ )
+
+ if recursive:
+ if squash:
+ raise ValueError(
+ "`recursive` and `squash` should not both be true"
+ )
+ layout = directory_layout(self)
+
+ for onto, path in layout.items():
+ fname = Path(dir) / f"{path}.{fmt}"
+ onto.save(
+ filename=fname,
+ format=format,
+ dir=dir,
+ mkdir=mkdir,
+ overwrite=overwrite,
+ recursive=False,
+ squash=False,
+ write_catalog_file=False,
+ )
+
+ if write_catalog_file:
+ catalog_files = set()
+ irimap = {}
+ for onto, path in layout.items():
+ irimap[
+ onto.get_version(as_iri=True)
+ ] = f"{dir}/{path}.{fmt}"
+ catalog_files.add(Path(path).parent / catalog_file)
+
+ for catfile in catalog_files:
+ write_catalog(
+ irimap.copy(),
+ output=catfile,
+ directory=dir,
+ append=append_catalog,
+ )
+
+ elif write_catalog_file:
+ write_catalog(
+ {self.get_version(as_iri=True): filename},
+ output=catalog_file,
+ directory=dir,
+ append=append_catalog,
+ )
+
+ if squash:
+ from rdflib import ( # pylint:disable=import-outside-toplevel
+ URIRef,
+ RDF,
+ OWL,
+ )
+
+ graph = self.world.as_rdflib_graph()
+ graph.namespace_manager.bind("emmo", EMMO)
+
+ # Remove anonymous namespace and imports
+ graph.remove((URIRef("http://anonymous"), RDF.type, OWL.Ontology))
+ imports = list(graph.triples((None, OWL.imports, None)))
+ for triple in imports:
+ graph.remove(triple)
+
+ graph.serialize(destination=filename, format=format)
+ elif format in OWLREADY2_FORMATS:
+ super().save(file=filename, format=fmt)
+ else:
+ # The try-finally clause is needed for cleanup and because
+ # we have to provide delete=False to NamedTemporaryFile
+ # since Windows does not allow to reopen an already open
+ # file.
+ try:
+ with tempfile.NamedTemporaryFile(
+ suffix=".owl", delete=False
+ ) as handle:
+ tmpfile = handle.name
+ super().save(tmpfile, format="ntriples")
+ graph = rdflib.Graph()
+ graph.parse(tmpfile, format="ntriples")
+ graph.serialize(destination=filename, format=format)
+ finally:
+ os.remove(tmpfile)
+
set_common_prefix(self, iri_base='http://emmo.info/emmo', prefix='emmo', visited=None)
+
+
+¶Set a common prefix for all imported ontologies +with the same first part of the base_iri.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
iri_base |
+ str |
+ The start of the base_iri to look for. Defaults to +the emmo base_iri http://emmo.info/emmo |
+ 'http://emmo.info/emmo' |
+
prefix |
+ str |
+ the desired prefix. Defaults to emmo. |
+ 'emmo' |
+
visited |
+ Optional[Set] |
+ Ontologies to skip. Only intended for internal use. |
+ None |
+
ontopy/ontology.py
def set_common_prefix(
+ self,
+ iri_base: str = "http://emmo.info/emmo",
+ prefix: str = "emmo",
+ visited: "Optional[Set]" = None,
+) -> None:
+ """Set a common prefix for all imported ontologies
+ with the same first part of the base_iri.
+
+ Args:
+ iri_base: The start of the base_iri to look for. Defaults to
+ the emmo base_iri http://emmo.info/emmo
+ prefix: the desired prefix. Defaults to emmo.
+ visited: Ontologies to skip. Only intended for internal use.
+ """
+ if visited is None:
+ visited = set()
+ if self.base_iri.startswith(iri_base):
+ self.prefix = prefix
+ for onto in self.imported_ontologies:
+ if not onto in visited:
+ visited.add(onto)
+ onto.set_common_prefix(
+ iri_base=iri_base, prefix=prefix, visited=visited
+ )
+
set_default_label_annotations(self)
+
+
+¶Sets the default label annotations.
+ +ontopy/ontology.py
def set_default_label_annotations(self):
+ """Sets the default label annotations."""
+ warnings.warn(
+ "Ontology.set_default_label_annotations() is deprecated. "
+ "Default label annotations are set by Ontology.__init__(). ",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.label_annotations = DEFAULT_LABEL_ANNOTATIONS[:]
+
set_version(self, version=None, version_iri=None)
+
+
+¶Assign version to ontology by asigning owl:versionIRI.
+If version
but not version_iri
is provided, the version
+IRI will be the combination of base_iri
and version
.
ontopy/ontology.py
def set_version(self, version=None, version_iri=None):
+ """Assign version to ontology by asigning owl:versionIRI.
+
+ If `version` but not `version_iri` is provided, the version
+ IRI will be the combination of `base_iri` and `version`.
+ """
+ _version_iri = "http://www.w3.org/2002/07/owl#versionIRI"
+ version_iri_storid = self.world._abbreviate(_version_iri)
+ if self._has_obj_triple_spo( # pylint: disable=unexpected-keyword-arg
+ # For some reason _has_obj_triples_spo exists in both
+ # owlready2.namespace.Namespace (with arguments subject/predicate)
+ # and in owlready2.triplelite._GraphManager (with arguments s/p)
+ # owlready2.Ontology inherits from Namespace directly
+ # and pylint checks that.
+ # It actually accesses the one in triplelite.
+ # subject=self.storid, predicate=version_iri_storid
+ s=self.storid,
+ p=version_iri_storid,
+ ):
+ self._del_obj_triple_spo(s=self.storid, p=version_iri_storid)
+
+ if not version_iri:
+ if not version:
+ raise TypeError(
+ "Either `version` or `version_iri` must be provided"
+ )
+ head, tail = self.base_iri.rstrip("#/").rsplit("/", 1)
+ version_iri = "/".join([head, version, tail])
+
+ self._add_obj_triple_spo(
+ s=self.storid,
+ p=self.world._abbreviate(_version_iri),
+ o=self.world._abbreviate(version_iri),
+ )
+
sync_attributes(self, name_policy=None, name_prefix='', class_docstring='comment', sync_imported=False)
+
+
+¶This method is intended to be called after you have added new
+classes (typically via Python) to make sure that attributes like
+label
and comments
are defined.
If a class, object property, data property or annotation +property in the current ontology has no label, the name of +the corresponding Python class will be assigned as label.
+If a class, object property, data property or annotation +property has no comment, it will be assigned the docstring of +the corresponding Python class.
+name_policy
specify wether and how the names in the ontology
+should be updated. Valid values are:
+ None not changed
+ "uuid" name_prefix
followed by a global unique id (UUID).
+ If the name is already valid accoridng to this standard
+ it will not be regenerated.
+ "sequential" name_prefix
followed a sequantial number.
+EMMO conventions imply name_policy=='uuid'
.
If sync_imported
is true, all imported ontologies are also
+updated.
The class_docstring
argument specifies the annotation that
+class docstrings are mapped to. Defaults to "comment".
ontopy/ontology.py
def sync_attributes( # pylint: disable=too-many-branches
+ self,
+ name_policy=None,
+ name_prefix="",
+ class_docstring="comment",
+ sync_imported=False,
+):
+ """This method is intended to be called after you have added new
+ classes (typically via Python) to make sure that attributes like
+ `label` and `comments` are defined.
+
+ If a class, object property, data property or annotation
+ property in the current ontology has no label, the name of
+ the corresponding Python class will be assigned as label.
+
+ If a class, object property, data property or annotation
+ property has no comment, it will be assigned the docstring of
+ the corresponding Python class.
+
+ `name_policy` specify wether and how the names in the ontology
+ should be updated. Valid values are:
+ None not changed
+ "uuid" `name_prefix` followed by a global unique id (UUID).
+ If the name is already valid accoridng to this standard
+ it will not be regenerated.
+ "sequential" `name_prefix` followed a sequantial number.
+ EMMO conventions imply ``name_policy=='uuid'``.
+
+ If `sync_imported` is true, all imported ontologies are also
+ updated.
+
+ The `class_docstring` argument specifies the annotation that
+ class docstrings are mapped to. Defaults to "comment".
+ """
+ for cls in itertools.chain(
+ self.classes(),
+ self.object_properties(),
+ self.data_properties(),
+ self.annotation_properties(),
+ ):
+ if not hasattr(cls, "prefLabel"):
+ # no prefLabel - create new annotation property..
+ with self:
+ # pylint: disable=invalid-name,missing-class-docstring
+ # pylint: disable=unused-variable
+ class prefLabel(owlready2.label):
+ pass
+
+ cls.prefLabel = [locstr(cls.__name__, lang="en")]
+ elif not cls.prefLabel:
+ cls.prefLabel.append(locstr(cls.__name__, lang="en"))
+ if class_docstring and hasattr(cls, "__doc__") and cls.__doc__:
+ getattr(cls, class_docstring).append(
+ locstr(inspect.cleandoc(cls.__doc__), lang="en")
+ )
+
+ for ind in self.individuals():
+ if not hasattr(ind, "prefLabel"):
+ # no prefLabel - create new annotation property..
+ with self:
+ # pylint: disable=invalid-name,missing-class-docstring
+ # pylint: disable=function-redefined
+ class prefLabel(owlready2.label):
+ iri = "http://www.w3.org/2004/02/skos/core#prefLabel"
+
+ ind.prefLabel = [locstr(ind.name, lang="en")]
+ elif not ind.prefLabel:
+ ind.prefLabel.append(locstr(ind.name, lang="en"))
+
+ chain = itertools.chain(
+ self.classes(),
+ self.individuals(),
+ self.object_properties(),
+ self.data_properties(),
+ self.annotation_properties(),
+ )
+ if name_policy == "uuid":
+ for obj in chain:
+ try:
+ # Passing the following means that the name is valid
+ # and need not be regenerated.
+ if not obj.name.startswith(name_prefix):
+ raise ValueError
+ uuid.UUID(obj.name.lstrip(name_prefix), version=5)
+ except ValueError:
+ obj.name = name_prefix + str(
+ uuid.uuid5(uuid.NAMESPACE_DNS, obj.name)
+ )
+ elif name_policy == "sequential":
+ for obj in chain:
+ counter = 0
+ while f"{self.base_iri}{name_prefix}{counter}" in self:
+ counter += 1
+ obj.name = f"{name_prefix}{counter}"
+ elif name_policy is not None:
+ raise TypeError(f"invalid name_policy: {name_policy!r}")
+
+ if sync_imported:
+ for onto in self.imported_ontologies:
+ onto.sync_attributes()
+
sync_python_names(self, annotations=('prefLabel', 'label', 'altLabel'))
+
+
+¶Update the python_name
attribute of all properties.
The python_name attribute will be set to the first non-empty
+annotation in the sequence of annotations in annotations
for
+the property.
ontopy/ontology.py
def sync_python_names(self, annotations=("prefLabel", "label", "altLabel")):
+ """Update the `python_name` attribute of all properties.
+
+ The python_name attribute will be set to the first non-empty
+ annotation in the sequence of annotations in `annotations` for
+ the property.
+ """
+
+ def update(gen):
+ for prop in gen:
+ for annotation in annotations:
+ if hasattr(prop, annotation) and getattr(prop, annotation):
+ prop.python_name = getattr(prop, annotation).first()
+ break
+
+ update(
+ self.get_entities(
+ classes=False,
+ individuals=False,
+ object_properties=False,
+ data_properties=False,
+ )
+ )
+ update(
+ self.get_entities(
+ classes=False, individuals=False, annotation_properties=False
+ )
+ )
+
sync_reasoner(self, reasoner='HermiT', include_imported=False, **kwargs)
+
+
+¶Update current ontology by running the given reasoner.
+Supported values for reasoner
are 'HermiT' (default), Pellet
+and 'FaCT++'.
If include_imported
is true, the reasoner will also reason
+over imported ontologies. Note that this may be very slow.
Keyword arguments are passed to the underlying owlready2 function.
+ +ontopy/ontology.py
def sync_reasoner(
+ self, reasoner="HermiT", include_imported=False, **kwargs
+):
+ """Update current ontology by running the given reasoner.
+
+ Supported values for `reasoner` are 'HermiT' (default), Pellet
+ and 'FaCT++'.
+
+ If `include_imported` is true, the reasoner will also reason
+ over imported ontologies. Note that this may be **very** slow.
+
+ Keyword arguments are passed to the underlying owlready2 function.
+ """
+ if reasoner == "FaCT++":
+ sync = sync_reasoner_factpp
+ elif reasoner == "Pellet":
+ sync = owlready2.sync_reasoner_pellet
+ elif reasoner == "HermiT":
+ sync = owlready2.sync_reasoner_hermit
+ else:
+ raise ValueError(
+ f"Unknown reasoner '{reasoner}'. Supported reasoners "
+ "are 'Pellet', 'HermiT' and 'FaCT++'."
+ )
+
+ # For some reason we must visit all entities once before running
+ # the reasoner...
+ list(self.get_entities())
+
+ with self:
+ if include_imported:
+ sync(self.world, **kwargs)
+ else:
+ sync(self, **kwargs)
+
+World (World)
+
+
+
+
+¶A subclass of owlready2.World.
+ +ontopy/ontology.py
class World(owlready2.World):
+ """A subclass of owlready2.World."""
+
+ def __init__(self, *args, **kwargs):
+ # Caches stored in the world
+ self._cached_catalogs = {} # maps url to (mtime, iris, dirs)
+ self._iri_mappings = {} # all iri mappings loaded so far
+ super().__init__(*args, **kwargs)
+
+ def get_ontology(
+ self,
+ base_iri: str = "emmo-inferred",
+ OntologyClass: "owlready2.Ontology" = None,
+ label_annotations: "Sequence" = None,
+ ) -> "Ontology":
+ # pylint: disable=too-many-branches
+ """Returns a new Ontology from `base_iri`.
+
+ Arguments:
+ base_iri: The base IRI of the ontology. May be one of:
+ - valid URL (possible excluding final .owl or .ttl)
+ - file name (possible excluding final .owl or .ttl)
+ - "emmo": load latest version of asserted EMMO
+ - "emmo-inferred": load latest version of inferred EMMO
+ (default)
+ - "emmo-development": load latest inferred development
+ version of EMMO. Until first stable release
+ emmo-inferred and emmo-development will be the same.
+ OntologyClass: If given and `base_iri` doesn't correspond
+ to an existing ontology, a new ontology is created of
+ this Ontology subclass. Defaults to `ontopy.Ontology`.
+ label_annotations: Sequence of label IRIs used for accessing
+ entities in the ontology given that they are in the ontology.
+ Label IRIs not in the ontology will need to be added to
+ ontologies in order to be accessible.
+ Defaults to DEFAULT_LABEL_ANNOTATIONS if set to None.
+ """
+ base_iri = base_iri.as_uri() if isinstance(base_iri, Path) else base_iri
+
+ if base_iri == "emmo":
+ base_iri = (
+ "http://emmo-repo.github.io/versions/1.0.0-beta4/emmo.ttl"
+ )
+ elif base_iri == "emmo-inferred":
+ base_iri = (
+ "https://emmo-repo.github.io/versions/1.0.0-beta4/"
+ "emmo-inferred.ttl"
+ )
+ elif base_iri == "emmo-development":
+ base_iri = (
+ "https://emmo-repo.github.io/versions/1.0.0-beta5/"
+ "emmo-inferred.ttl"
+ )
+
+ if base_iri in self.ontologies:
+ onto = self.ontologies[base_iri]
+ elif base_iri + "#" in self.ontologies:
+ onto = self.ontologies[base_iri + "#"]
+ elif base_iri + "/" in self.ontologies:
+ onto = self.ontologies[base_iri + "/"]
+ else:
+ if os.path.exists(base_iri):
+ iri = os.path.abspath(base_iri)
+ elif os.path.exists(base_iri + ".ttl"):
+ iri = os.path.abspath(base_iri + ".ttl")
+ elif os.path.exists(base_iri + ".owl"):
+ iri = os.path.abspath(base_iri + ".owl")
+ else:
+ iri = base_iri
+
+ if iri[-1] not in "/#":
+ iri += "#"
+
+ if OntologyClass is None:
+ OntologyClass = Ontology
+
+ onto = OntologyClass(self, iri)
+
+ if label_annotations:
+ onto.label_annotations = list(label_annotations)
+
+ return onto
+
+ def get_unabbreviated_triples(
+ self, subject=None, predicate=None, obj=None, blank=None
+ ):
+ # pylint: disable=invalid-name
+ """Returns all triples unabbreviated.
+
+ If any of the `subject`, `predicate` or `obj` arguments are given,
+ only matching triples will be returned.
+
+ If `blank` is given, it will be used to represent blank nodes.
+ """
+ return _get_unabbreviated_triples(
+ self, subject=subject, predicate=predicate, obj=obj, blank=blank
+ )
+
get_ontology(self, base_iri='emmo-inferred', OntologyClass=None, label_annotations=None)
+
+
+¶Returns a new Ontology from base_iri
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
base_iri |
+ str |
+ The base IRI of the ontology. May be one of: +- valid URL (possible excluding final .owl or .ttl) +- file name (possible excluding final .owl or .ttl) +- "emmo": load latest version of asserted EMMO +- "emmo-inferred": load latest version of inferred EMMO + (default) +- "emmo-development": load latest inferred development + version of EMMO. Until first stable release + emmo-inferred and emmo-development will be the same. |
+ 'emmo-inferred' |
+
OntologyClass |
+ owlready2.Ontology |
+ If given and |
+ None |
+
label_annotations |
+ Sequence |
+ Sequence of label IRIs used for accessing +entities in the ontology given that they are in the ontology. +Label IRIs not in the ontology will need to be added to +ontologies in order to be accessible. +Defaults to DEFAULT_LABEL_ANNOTATIONS if set to None. |
+ None |
+
ontopy/ontology.py
def get_ontology(
+ self,
+ base_iri: str = "emmo-inferred",
+ OntologyClass: "owlready2.Ontology" = None,
+ label_annotations: "Sequence" = None,
+) -> "Ontology":
+ # pylint: disable=too-many-branches
+ """Returns a new Ontology from `base_iri`.
+
+ Arguments:
+ base_iri: The base IRI of the ontology. May be one of:
+ - valid URL (possible excluding final .owl or .ttl)
+ - file name (possible excluding final .owl or .ttl)
+ - "emmo": load latest version of asserted EMMO
+ - "emmo-inferred": load latest version of inferred EMMO
+ (default)
+ - "emmo-development": load latest inferred development
+ version of EMMO. Until first stable release
+ emmo-inferred and emmo-development will be the same.
+ OntologyClass: If given and `base_iri` doesn't correspond
+ to an existing ontology, a new ontology is created of
+ this Ontology subclass. Defaults to `ontopy.Ontology`.
+ label_annotations: Sequence of label IRIs used for accessing
+ entities in the ontology given that they are in the ontology.
+ Label IRIs not in the ontology will need to be added to
+ ontologies in order to be accessible.
+ Defaults to DEFAULT_LABEL_ANNOTATIONS if set to None.
+ """
+ base_iri = base_iri.as_uri() if isinstance(base_iri, Path) else base_iri
+
+ if base_iri == "emmo":
+ base_iri = (
+ "http://emmo-repo.github.io/versions/1.0.0-beta4/emmo.ttl"
+ )
+ elif base_iri == "emmo-inferred":
+ base_iri = (
+ "https://emmo-repo.github.io/versions/1.0.0-beta4/"
+ "emmo-inferred.ttl"
+ )
+ elif base_iri == "emmo-development":
+ base_iri = (
+ "https://emmo-repo.github.io/versions/1.0.0-beta5/"
+ "emmo-inferred.ttl"
+ )
+
+ if base_iri in self.ontologies:
+ onto = self.ontologies[base_iri]
+ elif base_iri + "#" in self.ontologies:
+ onto = self.ontologies[base_iri + "#"]
+ elif base_iri + "/" in self.ontologies:
+ onto = self.ontologies[base_iri + "/"]
+ else:
+ if os.path.exists(base_iri):
+ iri = os.path.abspath(base_iri)
+ elif os.path.exists(base_iri + ".ttl"):
+ iri = os.path.abspath(base_iri + ".ttl")
+ elif os.path.exists(base_iri + ".owl"):
+ iri = os.path.abspath(base_iri + ".owl")
+ else:
+ iri = base_iri
+
+ if iri[-1] not in "/#":
+ iri += "#"
+
+ if OntologyClass is None:
+ OntologyClass = Ontology
+
+ onto = OntologyClass(self, iri)
+
+ if label_annotations:
+ onto.label_annotations = list(label_annotations)
+
+ return onto
+
get_unabbreviated_triples(self, subject=None, predicate=None, obj=None, blank=None)
+
+
+¶Returns all triples unabbreviated.
+If any of the subject
, predicate
or obj
arguments are given,
+only matching triples will be returned.
If blank
is given, it will be used to represent blank nodes.
ontopy/ontology.py
def get_unabbreviated_triples(
+ self, subject=None, predicate=None, obj=None, blank=None
+):
+ # pylint: disable=invalid-name
+ """Returns all triples unabbreviated.
+
+ If any of the `subject`, `predicate` or `obj` arguments are given,
+ only matching triples will be returned.
+
+ If `blank` is given, it will be used to represent blank nodes.
+ """
+ return _get_unabbreviated_triples(
+ self, subject=subject, predicate=predicate, obj=obj, blank=blank
+ )
+
flatten(items)
+
+
+¶Yield items from any nested iterable.
+ +ontopy/ontology.py
def flatten(items):
+ """Yield items from any nested iterable."""
+ for item in items:
+ if isinstance(item, Iterable) and not isinstance(item, (str, bytes)):
+ for sub_item in flatten(item):
+ yield sub_item
+ else:
+ yield item
+
get_ontology(*args, **kwargs)
+
+
+¶Returns a new Ontology from base_iri
.
This is a convenient function for calling World.get_ontology().
+ +ontopy/ontology.py
def get_ontology(*args, **kwargs):
+ """Returns a new Ontology from `base_iri`.
+
+ This is a convenient function for calling World.get_ontology()."""
+ return World().get_ontology(*args, **kwargs)
+
This module injects some additional methods into owlready2 classes.
+ + + +disjoint_with(self, reduce=False)
+
+
+¶Returns a generator with all classes that are disjoint with self
.
If reduce
is True
, all classes that are a descendant of another class
+will be excluded.
ontopy/patch.py
def disjoint_with(self, reduce=False):
+ """Returns a generator with all classes that are disjoint with `self`.
+
+ If `reduce` is `True`, all classes that are a descendant of another class
+ will be excluded.
+ """
+ if reduce:
+ disjoint_set = set(self.disjoint_with())
+ for entity in disjoint_set.copy():
+ disjoint_set.difference_update(
+ entity.descendants(include_self=False)
+ )
+ for entity in disjoint_set:
+ yield entity
+ else:
+ for disjoint in self.disjoints():
+ for entity in disjoint.entities:
+ if entity is not self:
+ yield entity
+
get_annotations(self, all=False, imported=True)
+
+
+¶Returns a dict with non-empty annotations.
+If all
is True
, also annotations with no value are included.
If imported
is True
, also include annotations defined in imported
+ontologies.
ontopy/patch.py
def get_annotations(
+ self, all=False, imported=True
+): # pylint: disable=redefined-builtin
+ """Returns a dict with non-empty annotations.
+
+ If `all` is `True`, also annotations with no value are included.
+
+ If `imported` is `True`, also include annotations defined in imported
+ ontologies.
+ """
+ onto = self.namespace.ontology
+
+ annotations = {
+ str(get_preferred_label(_)): _._get_values_for_class(self)
+ for _ in onto.annotation_properties(imported=imported)
+ }
+ if all:
+ return annotations
+ return {key: value for key, value in annotations.items() if value}
+
get_indirect_is_a(self, skip_classes=True)
+
+
+¶Returns the set of all isSubclassOf relations of self and its ancestors.
+If skip_classes
is True
, indirect classes are not included in the
+returned set.
ontopy/patch.py
def get_indirect_is_a(self, skip_classes=True):
+ """Returns the set of all isSubclassOf relations of self and its ancestors.
+
+ If `skip_classes` is `True`, indirect classes are not included in the
+ returned set.
+ """
+ subclass_relations = set()
+ for entity in reversed(self.mro()):
+ for attr in "is_a", "equivalent_to":
+ if hasattr(entity, attr):
+ lst = getattr(entity, attr)
+ if skip_classes:
+ subclass_relations.update(
+ r
+ for r in lst
+ if not isinstance(r, owlready2.ThingClass)
+ )
+ else:
+ subclass_relations.update(lst)
+
+ subclass_relations.update(self.is_a)
+ return subclass_relations
+
get_parents(self, strict=False)
+
+
+¶Returns a list of all parents.
+If strict
is True
, parents that are parents of other parents are
+excluded.
ontopy/patch.py
def get_parents(self, strict=False):
+ """Returns a list of all parents.
+
+ If `strict` is `True`, parents that are parents of other parents are
+ excluded.
+ """
+ if strict:
+ parents = self.get_parents()
+ for entity in parents.copy():
+ parents.difference_update(entity.ancestors(include_self=False))
+ return parents
+ if isinstance(self, ThingClass):
+ return {cls for cls in self.is_a if isinstance(cls, ThingClass)}
+ if isinstance(self, owlready2.ObjectPropertyClass):
+ return {
+ cls
+ for cls in self.is_a
+ if isinstance(cls, owlready2.ObjectPropertyClass)
+ }
+ raise EMMOntoPyException(
+ "self has no parents - this should not be possible!"
+ )
+
get_preferred_label(self)
+
+
+¶Returns the preferred label as a string (not list).
+The following heuristics is used: + - if prefLabel annotation property exists, returns the first prefLabel + - if label annotation property exists, returns the first label + - otherwise return the name
+ +ontopy/patch.py
def get_preferred_label(self):
+ """Returns the preferred label as a string (not list).
+
+ The following heuristics is used:
+ - if prefLabel annotation property exists, returns the first prefLabel
+ - if label annotation property exists, returns the first label
+ - otherwise return the name
+ """
+ if hasattr(self, "prefLabel") and self.prefLabel:
+ return self.prefLabel[0]
+ if hasattr(self, "label") and self.label:
+ return self.label.first()
+ return self.name
+
get_typename(self)
+
+
+¶Get restriction type label/name.
+ +ontopy/patch.py
def get_typename(self):
+ """Get restriction type label/name."""
+ return owlready2.class_construct._restriction_type_2_label[self.type]
+
has(self, name)
+
+
+¶Returns true if name
ontopy/patch.py
def has(self, name):
+ """Returns true if `name`"""
+ return name in set(self.keys())
+
items(self)
+
+
+¶Return a generator over annotation property (name, value_list) +pairs associates with this ontology.
+ +ontopy/patch.py
def items(self):
+ """Return a generator over annotation property (name, value_list)
+ pairs associates with this ontology."""
+ namespace = self.namespace
+ for annotation in namespace.annotation_properties():
+ if namespace._has_data_triple_spod(
+ s=namespace.storid, p=annotation.storid
+ ):
+ yield annotation, getattr(self, annotation.name)
+
keys(self)
+
+
+¶Return a generator over annotation property names associated +with this ontology.
+ +ontopy/patch.py
def keys(self):
+ """Return a generator over annotation property names associated
+ with this ontology."""
+ namespace = self.namespace
+ for annotation in namespace.annotation_properties():
+ if namespace._has_data_triple_spod(
+ s=namespace.storid, p=annotation.storid
+ ):
+ yield annotation
+
namespace_init(self, world_or_ontology, base_iri, name=None)
+
+
+¶init function for the Namespace
class.
ontopy/patch.py
def namespace_init(self, world_or_ontology, base_iri, name=None):
+ """__init__ function for the `Namespace` class."""
+ orig_namespace_init(self, world_or_ontology, base_iri, name)
+ if self.name.endswith(".ttl"):
+ self.name = self.name[:-4]
+
render_func(entity)
+
+
+¶Improve default rendering of entities.
+ +ontopy/patch.py
def render_func(entity):
+ """Improve default rendering of entities."""
+ if hasattr(entity, "prefLabel") and entity.prefLabel:
+ name = entity.prefLabel[0]
+ elif hasattr(entity, "label") and entity.label:
+ name = entity.label[0]
+ elif hasattr(entity, "altLabel") and entity.altLabel:
+ name = entity.altLabel[0]
+ else:
+ name = entity.name
+ return f"{entity.namespace.name}.{name}"
+
Some generic utility functions.
+ + + +
+AmbiguousLabelError (LookupError, AttributeError, EMMOntoPyException)
+
+
+
+
+¶Error raised when a label is ambiguous.
+ +ontopy/utils.py
class AmbiguousLabelError(LookupError, AttributeError, EMMOntoPyException):
+ """Error raised when a label is ambiguous."""
+
+EMMOntoPyException (Exception)
+
+
+
+
+¶A BaseException class for EMMOntoPy
+ +ontopy/utils.py
class EMMOntoPyException(Exception):
+ """A BaseException class for EMMOntoPy"""
+
+EMMOntoPyWarning (Warning)
+
+
+
+
+¶A BaseWarning class for EMMOntoPy
+ +ontopy/utils.py
class EMMOntoPyWarning(Warning):
+ """A BaseWarning class for EMMOntoPy"""
+
+EntityClassDefinitionError (EMMOntoPyException)
+
+
+
+
+¶Error in ThingClass definition.
+ +ontopy/utils.py
class EntityClassDefinitionError(EMMOntoPyException):
+ """Error in ThingClass definition."""
+
+IncompatibleVersion (EMMOntoPyWarning)
+
+
+
+
+¶An installed dependency version may be incompatible with a functionality +of this package - or rather an outcome of a functionality. +This is not critical, hence this is only a warning.
+ +ontopy/utils.py
class IncompatibleVersion(EMMOntoPyWarning):
+ """An installed dependency version may be incompatible with a functionality
+ of this package - or rather an outcome of a functionality.
+ This is not critical, hence this is only a warning."""
+
+IndividualWarning (EMMOntoPyWarning)
+
+
+
+
+¶A warning related to an individual, e.g. punning.
+ +ontopy/utils.py
class IndividualWarning(EMMOntoPyWarning):
+ """A warning related to an individual, e.g. punning."""
+
+LabelDefinitionError (EMMOntoPyException)
+
+
+
+
+¶Error in label definition.
+ +ontopy/utils.py
class LabelDefinitionError(EMMOntoPyException):
+ """Error in label definition."""
+
+NoSuchLabelError (LookupError, AttributeError, EMMOntoPyException)
+
+
+
+
+¶Error raised when a label cannot be found.
+ +ontopy/utils.py
class NoSuchLabelError(LookupError, AttributeError, EMMOntoPyException):
+ """Error raised when a label cannot be found."""
+
+ReadCatalogError (OSError)
+
+
+
+
+¶Error reading catalog file.
+ +ontopy/utils.py
class ReadCatalogError(IOError):
+ """Error reading catalog file."""
+
+UnknownVersion (EMMOntoPyException)
+
+
+
+
+¶Cannot retrieve version from a package.
+ +ontopy/utils.py
class UnknownVersion(EMMOntoPyException):
+ """Cannot retrieve version from a package."""
+
annotate_source(onto, imported=True)
+
+
+¶Annotate all entities with the base IRI of the ontology using
+rdfs:isDefinedBy
annotations.
If imported
is true, all entities in imported sub-ontologies will
+also be annotated.
This is contextual information that is otherwise lost when the ontology +is squashed and/or inferred.
+ +ontopy/utils.py
def annotate_source(onto, imported=True):
+ """Annotate all entities with the base IRI of the ontology using
+ `rdfs:isDefinedBy` annotations.
+
+ If `imported` is true, all entities in imported sub-ontologies will
+ also be annotated.
+
+ This is contextual information that is otherwise lost when the ontology
+ is squashed and/or inferred.
+ """
+ source = onto._abbreviate(
+ "http://www.w3.org/2000/01/rdf-schema#isDefinedBy"
+ )
+ for entity in onto.get_entities(imported=imported):
+ triple = (
+ entity.storid,
+ source,
+ onto._abbreviate(entity.namespace.ontology.base_iri),
+ )
+ if not onto._has_obj_triple_spo(*triple):
+ onto._add_obj_triple_spo(*triple)
+
asstring(expr, link='{label}', recursion_depth=0, exclude_object=False, ontology=None)
+
+
+¶Returns a string representation of expr
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
expr |
+ + | The entity, restriction or a logical expression or these +to represent. |
+ required | +
link |
+ + | A template for links. May contain the following variables: +- {iri}: The full IRI of the concept. +- {name}: Name-part of IRI. +- {ref}: "#{name}" if the base iri of hte ontology has the same + root as {iri}, otherwise "{iri}". +- {label}: The label of the concept. +- {lowerlabel}: The label of the concept in lower case and with + spaces replaced with hyphens. |
+ '{label}' |
+
recursion_depth |
+ + | Recursion depth. Only intended for internal use. |
+ 0 |
+
exclude_object |
+ + | If true, the object will be excluded in restrictions. |
+ False |
+
ontology |
+ + | Ontology object. |
+ None |
+
Returns:
+Type | +Description | +
---|---|
str |
+ String representation of |
+
ontopy/utils.py
def asstring( # pylint: disable=too-many-return-statements,too-many-branches,too-many-statements
+ expr,
+ link="{label}",
+ recursion_depth=0,
+ exclude_object=False,
+ ontology=None,
+) -> str:
+ """Returns a string representation of `expr`.
+
+ Arguments:
+ expr: The entity, restriction or a logical expression or these
+ to represent.
+ link: A template for links. May contain the following variables:
+ - {iri}: The full IRI of the concept.
+ - {name}: Name-part of IRI.
+ - {ref}: "#{name}" if the base iri of hte ontology has the same
+ root as {iri}, otherwise "{iri}".
+ - {label}: The label of the concept.
+ - {lowerlabel}: The label of the concept in lower case and with
+ spaces replaced with hyphens.
+ recursion_depth: Recursion depth. Only intended for internal use.
+ exclude_object: If true, the object will be excluded in restrictions.
+ ontology: Ontology object.
+
+ Returns:
+ String representation of `expr`.
+ """
+ if ontology is None:
+ ontology = expr.ontology
+
+ def fmt(entity):
+ """Returns the formatted label of an entity."""
+ if isinstance(entity, str):
+ if ontology and ontology.world[entity]:
+ iri = ontology.world[entity].iri
+ elif (
+ ontology
+ and re.match("^[a-zA-Z0-9_+-]+$", entity)
+ and entity in ontology
+ ):
+ iri = ontology[entity].iri
+ else:
+ # This may not be a valid IRI, but the best we can do
+ iri = entity
+ label = entity
+ else:
+ iri = entity.iri
+ label = get_label(entity)
+ name = getiriname(iri)
+ start = iri.split("#", 1)[0] if "#" in iri else iri.rsplit("/", 1)[0]
+ ref = f"#{name}" if ontology.base_iri.startswith(start) else iri
+ return link.format(
+ entity=entity,
+ name=name,
+ ref=ref,
+ iri=iri,
+ label=label,
+ lowerlabel=label.lower().replace(" ", "-"),
+ )
+
+ if isinstance(expr, str):
+ # return link.format(name=expr)
+ return fmt(expr)
+ if isinstance(expr, owlready2.Restriction):
+ rlabel = owlready2.class_construct._restriction_type_2_label[expr.type]
+
+ if isinstance(
+ expr.property,
+ (owlready2.ObjectPropertyClass, owlready2.DataPropertyClass),
+ ):
+ res = fmt(expr.property)
+ elif isinstance(expr.property, owlready2.Inverse):
+ string = asstring(
+ expr.property.property,
+ link,
+ recursion_depth + 1,
+ ontology=ontology,
+ )
+ res = f"Inverse({string})"
+ else:
+ print(
+ f"*** WARNING: unknown restriction property: {expr.property!r}"
+ )
+ res = fmt(expr.property)
+
+ if not rlabel:
+ pass
+ elif expr.type in (owlready2.MIN, owlready2.MAX, owlready2.EXACTLY):
+ res += f" {rlabel} {expr.cardinality}"
+ elif expr.type in (
+ owlready2.SOME,
+ owlready2.ONLY,
+ owlready2.VALUE,
+ owlready2.HAS_SELF,
+ ):
+ res += f" {rlabel}"
+ else:
+ print("*** WARNING: unknown relation", expr, rlabel)
+ res += f" {rlabel}"
+
+ if not exclude_object:
+ string = asstring(
+ expr.value, link, recursion_depth + 1, ontology=ontology
+ )
+ res += (
+ f" {string!r}" if isinstance(expr.value, str) else f" {string}"
+ )
+ return res
+ if isinstance(expr, owlready2.Or):
+ res = " or ".join(
+ [
+ asstring(c, link, recursion_depth + 1, ontology=ontology)
+ for c in expr.Classes
+ ]
+ )
+ return res if recursion_depth == 0 else f"({res})"
+ if isinstance(expr, owlready2.And):
+ res = " and ".join(
+ [
+ asstring(c, link, recursion_depth + 1, ontology=ontology)
+ for c in expr.Classes
+ ]
+ )
+ return res if recursion_depth == 0 else f"({res})"
+ if isinstance(expr, owlready2.Not):
+ string = asstring(
+ expr.Class, link, recursion_depth + 1, ontology=ontology
+ )
+ return f"not {string}"
+ if isinstance(expr, owlready2.ThingClass):
+ return fmt(expr)
+ if isinstance(expr, owlready2.PropertyClass):
+ return fmt(expr)
+ if isinstance(expr, owlready2.Thing): # instance (individual)
+ return fmt(expr)
+ if isinstance(expr, owlready2.class_construct.Inverse):
+ return f"inverse({fmt(expr.property)})"
+ if isinstance(expr, owlready2.disjoint.AllDisjoint):
+ return fmt(expr)
+
+ if isinstance(expr, (bool, int, float)):
+ return repr(expr)
+ # Check for subclasses
+ if inspect.isclass(expr):
+ if issubclass(expr, (bool, int, float, str)):
+ return fmt(expr.__class__.__name__)
+ if issubclass(expr, datetime.date):
+ return "date"
+ if issubclass(expr, datetime.time):
+ return "datetime"
+ if issubclass(expr, datetime.datetime):
+ return "datetime"
+
+ raise RuntimeError(f"Unknown expression: {expr!r} (type: {type(expr)!r})")
+
camelsplit(string)
+
+
+¶Splits CamelCase string before upper case letters (except +if there is a sequence of upper case letters).
+ +ontopy/utils.py
def camelsplit(string):
+ """Splits CamelCase string before upper case letters (except
+ if there is a sequence of upper case letters)."""
+ if len(string) < 2:
+ return string
+ result = []
+ prev_lower = False
+ prev_isspace = True
+ char = string[0]
+ for next_char in string[1:]:
+ if (not prev_isspace and char.isupper() and next_char.islower()) or (
+ prev_lower and char.isupper()
+ ):
+ result.append(" ")
+ result.append(char)
+ prev_lower = char.islower()
+ prev_isspace = char.isspace()
+ char = next_char
+ result.append(char)
+ return "".join(result)
+
convert_imported(input_ontology, output_ontology, input_format=None, output_format='xml', url_from_catalog=None, catalog_file='catalog-v001.xml')
+
+
+¶Convert imported ontologies.
+Store the output in a directory structure matching the source +files. This require catalog file(s) to be present.
+Warning
+To convert to Turtle (.ttl
) format, you must have installed
+rdflib>=6.0.0
. See Known issues for
+more information.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
input_ontology |
+ Union[Path, str] |
+ input ontology file name |
+ required | +
output_ontology |
+ Union[Path, str] |
+ output ontology file path. The directory part of
+ |
+ required | +
input_format |
+ Optional[str] |
+ input format. The default is to infer from
+ |
+ None |
+
output_format |
+ str |
+ output format. The default is to infer from
+ |
+ 'xml' |
+
url_from_catalog |
+ Optional[bool] |
+ Whether to read urls form catalog file. +If False, the catalog file will be used if it exists. |
+ None |
+
catalog_file |
+ str |
+ name of catalog file, that maps ontology IRIs to +local file names |
+ 'catalog-v001.xml' |
+
ontopy/utils.py
def convert_imported( # pylint: disable=too-many-arguments,too-many-locals
+ input_ontology: "Union[Path, str]",
+ output_ontology: "Union[Path, str]",
+ input_format: "Optional[str]" = None,
+ output_format: str = "xml",
+ url_from_catalog: "Optional[bool]" = None,
+ catalog_file: str = "catalog-v001.xml",
+):
+ """Convert imported ontologies.
+
+ Store the output in a directory structure matching the source
+ files. This require catalog file(s) to be present.
+
+ Warning:
+ To convert to Turtle (`.ttl`) format, you must have installed
+ `rdflib>=6.0.0`. See [Known issues](../../../#known-issues) for
+ more information.
+
+ Args:
+ input_ontology: input ontology file name
+ output_ontology: output ontology file path. The directory part of
+ `output` will be the root of the generated directory structure
+ input_format: input format. The default is to infer from
+ `input_ontology`
+ output_format: output format. The default is to infer from
+ `output_ontology`
+ url_from_catalog: Whether to read urls form catalog file.
+ If False, the catalog file will be used if it exists.
+ catalog_file: name of catalog file, that maps ontology IRIs to
+ local file names
+ """
+ inroot = os.path.dirname(os.path.abspath(input_ontology))
+ outroot = os.path.dirname(os.path.abspath(output_ontology))
+ outext = os.path.splitext(output_ontology)[1]
+
+ if url_from_catalog is None:
+ url_from_catalog = os.path.exists(os.path.join(inroot, catalog_file))
+
+ if url_from_catalog:
+ iris, dirs = read_catalog(
+ inroot, catalog_file=catalog_file, recursive=True, return_paths=True
+ )
+
+ # Create output dirs and copy catalog files
+ for indir in dirs:
+ outdir = os.path.normpath(
+ os.path.join(outroot, os.path.relpath(indir, inroot))
+ )
+ if not os.path.exists(outdir):
+ os.makedirs(outdir)
+ with open(
+ os.path.join(indir, catalog_file), mode="rt", encoding="utf8"
+ ) as handle:
+ content = handle.read()
+ for path in iris.values():
+ newpath = os.path.splitext(path)[0] + outext
+ content = content.replace(
+ os.path.basename(path), os.path.basename(newpath)
+ )
+ with open(
+ os.path.join(outdir, catalog_file), mode="wt", encoding="utf8"
+ ) as handle:
+ handle.write(content)
+ else:
+ iris = {}
+
+ outpaths = set()
+
+ def recur(graph, outext):
+ for imported in graph.objects(
+ predicate=URIRef("http://www.w3.org/2002/07/owl#imports")
+ ):
+ inpath = iris.get(str(imported), str(imported))
+ if inpath.startswith(("http://", "https://", "ftp://")):
+ outpath = os.path.join(outroot, inpath.split("/")[-1])
+ else:
+ outpath = os.path.join(outroot, os.path.relpath(inpath, inroot))
+ outpath = os.path.splitext(os.path.normpath(outpath))[0] + outext
+ if outpath not in outpaths:
+ outpaths.add(outpath)
+ fmt = (
+ input_format
+ if input_format
+ else guess_format(inpath, fmap=FMAP)
+ )
+ new_graph = Graph()
+ new_graph.parse(iris.get(inpath, inpath), format=fmt)
+ new_graph.serialize(destination=outpath, format=output_format)
+ recur(new_graph, outext)
+
+ # Write output files
+ fmt = (
+ input_format
+ if input_format
+ else guess_format(input_ontology, fmap=FMAP)
+ )
+
+ if not _validate_installed_version(
+ package="rdflib", min_version="6.0.0"
+ ) and (output_format == FMAP.get("ttl", "") or outext == "ttl"):
+ from rdflib import ( # pylint: disable=import-outside-toplevel
+ __version__ as __rdflib_version__,
+ )
+
+ warnings.warn(
+ IncompatibleVersion(
+ "To correctly convert to Turtle format, rdflib must be "
+ "version 6.0.0 or greater, however, the detected rdflib "
+ "version used by your Python interpreter is "
+ f"{__rdflib_version__!r}. For more information see the "
+ "'Known issues' section of the README."
+ )
+ )
+
+ graph = Graph()
+ try:
+ graph.parse(input_ontology, format=fmt)
+ except PluginException as exc: # Add input_ontology to exception msg
+ raise PluginException(
+ f'Cannot load "{input_ontology}": {exc.msg}'
+ ).with_traceback(exc.__traceback__)
+ graph.serialize(destination=output_ontology, format=output_format)
+ recur(graph, outext)
+
directory_layout(onto)
+
+
+¶Analyse IRIs of imported ontologies and suggested a directory +layout for saving recursively.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
onto |
+ + | Ontology to analyse. |
+ required | +
Returns:
+Type | +Description | +
---|---|
layout |
+ A dict mapping ontology objects to relative path names + derived from the ontology IRIs. No file name extension are + added. |
+
Examples:
+Assume that our ontology onto
has IRI ex:onto
. If it directly
+or indirectly imports ontologies with IRIs ex:A/ontoA
, ex:B/ontoB
+and ex:A/C/ontoC
, this function will return the following dict:
{
+ onto: "onto",
+ ontoA: "A/ontoA",
+ ontoB: "B/ontoB",
+ ontoC: "A/C/ontoC",
+}
+
where ontoA
, ontoB
and ontoC
are imported Ontology objects.
ontopy/utils.py
def directory_layout(onto):
+ """Analyse IRIs of imported ontologies and suggested a directory
+ layout for saving recursively.
+
+ Arguments:
+ onto: Ontology to analyse.
+
+ Returns:
+ layout: A dict mapping ontology objects to relative path names
+ derived from the ontology IRIs. No file name extension are
+ added.
+
+ Example:
+ Assume that our ontology `onto` has IRI `ex:onto`. If it directly
+ or indirectly imports ontologies with IRIs `ex:A/ontoA`, `ex:B/ontoB`
+ and `ex:A/C/ontoC`, this function will return the following dict:
+
+ {
+ onto: "onto",
+ ontoA: "A/ontoA",
+ ontoB: "B/ontoB",
+ ontoC: "A/C/ontoC",
+ }
+
+ where `ontoA`, `ontoB` and `ontoC` are imported Ontology objects.
+ """
+ layout = {}
+
+ def recur(o):
+ for imported in o.imported_ontologies:
+ if imported not in layout:
+ recur(imported)
+ baseiri = o.base_iri.rstrip("/#")
+
+ # Some heuristics here to reproduce the EMMO layout.
+ # It might not apply to all ontologies, so maybe it should be
+ # made optional? Alternatively, change EMMO ontology IRIs to
+ # match the directory layout.
+ emmolayout = (
+ any(
+ oo.base_iri.startswith(baseiri + "/")
+ for oo in o.imported_ontologies
+ )
+ or o.base_iri == "http://emmo.info/emmo/mereocausality#"
+ )
+
+ layout[o] = (
+ baseiri + "/" + os.path.basename(baseiri) if emmolayout else baseiri
+ )
+
+ recur(onto)
+
+ # Strip off initial common prefix from all paths
+ prefix = os.path.commonprefix(list(layout.values()))
+ for o, path in layout.items():
+ layout[o] = path[len(prefix) :].lstrip("/")
+
+ return layout
+
english(string)
+
+
+¶Returns string
as an English location string.
ontopy/utils.py
def english(string):
+ """Returns `string` as an English location string."""
+ return owlready2.locstr(string, lang="en")
+
get_format(outfile, default, fmt=None)
+
+
+¶Infer format from outfile and format.
+ +ontopy/utils.py
def get_format(outfile: str, default: str, fmt: str = None):
+ """Infer format from outfile and format."""
+ if fmt is None:
+ fmt = os.path.splitext(outfile)[1]
+ if not fmt:
+ fmt = default
+ return fmt.lstrip(".")
+
get_label(entity)
+
+
+¶Returns the label of an entity.
+ +ontopy/utils.py
def get_label(entity):
+ """Returns the label of an entity."""
+ if hasattr(entity, "prefLabel") and entity.prefLabel:
+ return entity.prefLabel.first()
+ if hasattr(entity, "label") and entity.label:
+ return entity.label.first()
+ if hasattr(entity, "__name__"):
+ return entity.__name__
+ if hasattr(entity, "name"):
+ return str(entity.name)
+ if isinstance(entity, str):
+ return entity
+ return repr(entity)
+
getiriname(iri)
+
+
+¶Return name part of an IRI.
+The name part is what follows after the last slash or hash.
+ +ontopy/utils.py
def getiriname(iri):
+ """Return name part of an IRI.
+
+ The name part is what follows after the last slash or hash.
+ """
+ res = urllib.parse.urlparse(iri)
+ return res.fragment if res.fragment else res.path.rsplit("/", 1)[-1]
+
infer_version(iri, version_iri)
+
+
+¶Infer version from IRI and versionIRI.
+ +ontopy/utils.py
def infer_version(iri, version_iri):
+ """Infer version from IRI and versionIRI."""
+ if str(version_iri[: len(iri)]) == str(iri):
+ version = version_iri[len(iri) :].lstrip("/")
+ else:
+ j = 0
+ version_parts = []
+ for i, char in enumerate(iri):
+ while i + j < len(version_iri) and char != version_iri[i + j]:
+ version_parts.append(version_iri[i + j])
+ j += 1
+ version = "".join(version_parts).lstrip("/").rstrip("/#")
+
+ if "/" in version:
+ raise ValueError(
+ f"version IRI {version_iri!r} is not consistent with base IRI "
+ f"{iri!r}"
+ )
+ return version
+
isinteractive()
+
+
+¶Returns true if we are running from an interactive interpreater, +false otherwise.
+ +ontopy/utils.py
def isinteractive():
+ """Returns true if we are running from an interactive interpreater,
+ false otherwise."""
+ return bool(
+ hasattr(__builtins__, "__IPYTHON__")
+ or sys.flags.interactive
+ or hasattr(sys, "ps1")
+ )
+
normalise_url(url)
+
+
+¶Returns url
in a normalised form.
ontopy/utils.py
def normalise_url(url):
+ """Returns `url` in a normalised form."""
+ splitted = urllib.parse.urlsplit(url)
+ components = list(splitted)
+ components[2] = os.path.normpath(splitted.path)
+ return urllib.parse.urlunsplit(components)
+
read_catalog(uri, catalog_file='catalog-v001.xml', baseuri=None, recursive=False, relative_to=None, return_paths=False, visited_iris=None, visited_paths=None)
+
+
+¶Reads a Protègè catalog file and returns as a dict.
+The returned dict maps the ontology IRI (name) to its actual +location (URI). The location can be either an absolute file path +or a HTTP, HTTPS or FTP web location.
+uri
is a string locating the catalog file. It may be a http or
+https web location or a file path.
The catalog_file
argument spesifies the catalog file name and is
+used if path
is used when recursive
is true or when path
is a
+directory.
If baseuri
is not None, it will be used as the base URI for the
+mapped locations. Otherwise it defaults to uri
with its final
+component omitted.
If recursive
is true, catalog files in sub-folders are also read.
if relative_to
is given, the paths in the returned dict will be
+relative to this path.
If return_paths
is true, a set of directory paths to source
+files is returned in addition to the default dict.
The visited_uris
and visited_paths
arguments are only intended for
+internal use to avoid infinite recursions.
A ReadCatalogError is raised if the catalog file cannot be found.
+ +ontopy/utils.py
def read_catalog( # pylint: disable=too-many-locals,too-many-statements,too-many-arguments
+ uri,
+ catalog_file="catalog-v001.xml",
+ baseuri=None,
+ recursive=False,
+ relative_to=None,
+ return_paths=False,
+ visited_iris=None,
+ visited_paths=None,
+):
+ """Reads a Protègè catalog file and returns as a dict.
+
+ The returned dict maps the ontology IRI (name) to its actual
+ location (URI). The location can be either an absolute file path
+ or a HTTP, HTTPS or FTP web location.
+
+ `uri` is a string locating the catalog file. It may be a http or
+ https web location or a file path.
+
+ The `catalog_file` argument spesifies the catalog file name and is
+ used if `path` is used when `recursive` is true or when `path` is a
+ directory.
+
+ If `baseuri` is not None, it will be used as the base URI for the
+ mapped locations. Otherwise it defaults to `uri` with its final
+ component omitted.
+
+ If `recursive` is true, catalog files in sub-folders are also read.
+
+ if `relative_to` is given, the paths in the returned dict will be
+ relative to this path.
+
+ If `return_paths` is true, a set of directory paths to source
+ files is returned in addition to the default dict.
+
+ The `visited_uris` and `visited_paths` arguments are only intended for
+ internal use to avoid infinite recursions.
+
+ A ReadCatalogError is raised if the catalog file cannot be found.
+ """
+ # pylint: disable=too-many-branches
+
+ # Protocols supported by urllib.request
+ web_protocols = "http://", "https://", "ftp://"
+ uri = str(uri) # in case uri is a pathlib.Path object
+ iris = visited_iris if visited_iris else {}
+ dirs = visited_paths if visited_paths else set()
+ if uri in iris:
+ return (iris, dirs) if return_paths else iris
+
+ if uri.startswith(web_protocols):
+ # Call read_catalog() recursively to ensure that the temporary
+ # file is properly cleaned up
+ with tempfile.TemporaryDirectory() as tmpdir:
+ destfile = os.path.join(tmpdir, catalog_file)
+ uris = { # maps uri to base
+ uri: (baseuri if baseuri else os.path.dirname(uri)),
+ f'{uri.rstrip("/")}/{catalog_file}': (
+ baseuri if baseuri else uri.rstrip("/")
+ ),
+ f"{os.path.dirname(uri)}/{catalog_file}": (
+ os.path.dirname(uri)
+ ),
+ }
+ for url, base in uris.items():
+ try:
+ # The URL can only contain the schemes from `web_protocols`.
+ _, msg = urllib.request.urlretrieve(url, destfile) # nosec
+ except urllib.request.URLError:
+ continue
+ else:
+ if "Content-Length" not in msg:
+ continue
+
+ return read_catalog(
+ destfile,
+ catalog_file=catalog_file,
+ baseuri=baseuri if baseuri else base,
+ recursive=recursive,
+ return_paths=return_paths,
+ visited_iris=iris,
+ visited_paths=dirs,
+ )
+ raise ReadCatalogError(
+ "Cannot download catalog from URLs: " + ", ".join(uris)
+ )
+ elif uri.startswith("file://"):
+ path = uri[7:]
+ else:
+ path = uri
+
+ if os.path.isdir(path):
+ dirname = os.path.abspath(path)
+ filepath = os.path.join(dirname, catalog_file)
+ else:
+ catalog_file = os.path.basename(path)
+ filepath = os.path.abspath(path)
+ dirname = os.path.dirname(filepath)
+
+ def gettag(entity):
+ return entity.tag.rsplit("}", 1)[-1]
+
+ def load_catalog(filepath):
+ if not os.path.exists(filepath):
+ raise ReadCatalogError("No such catalog file: " + filepath)
+ dirname = os.path.normpath(os.path.dirname(filepath))
+ dirs.add(baseuri if baseuri else dirname)
+ xml = ET.parse(filepath)
+ root = xml.getroot()
+ if gettag(root) != "catalog":
+ raise ReadCatalogError(
+ f"expected root tag of catalog file {filepath!r} to be "
+ '"catalog"'
+ )
+ for child in root:
+ if gettag(child) == "uri":
+ load_uri(child, dirname)
+ elif gettag(child) == "group":
+ for uri in child:
+ load_uri(uri, dirname)
+
+ def load_uri(uri, dirname):
+ if gettag(uri) != "uri":
+ raise ValueError(f"{gettag(uri)!r} should be 'uri'.")
+ uri_as_str = uri.attrib["uri"]
+ if uri_as_str.startswith(web_protocols):
+ url = uri_as_str
+ else:
+ uri_as_str = os.path.normpath(uri_as_str)
+ if baseuri and baseuri.startswith(web_protocols):
+ url = f"{baseuri}/{uri_as_str}"
+ else:
+ url = os.path.join(baseuri if baseuri else dirname, uri_as_str)
+
+ iris.setdefault(uri.attrib["name"], url)
+ if recursive:
+ directory = os.path.dirname(url)
+ if directory not in dirs:
+ catalog = os.path.join(directory, catalog_file)
+ if catalog.startswith(web_protocols):
+ iris_, dirs_ = read_catalog(
+ catalog,
+ catalog_file=catalog_file,
+ baseuri=None,
+ recursive=recursive,
+ return_paths=True,
+ visited_iris=iris,
+ visited_paths=dirs,
+ )
+ iris.update(iris_)
+ dirs.update(dirs_)
+ else:
+ load_catalog(catalog)
+
+ load_catalog(filepath)
+
+ if relative_to:
+ for iri, path in iris.items():
+ iris[iri] = os.path.relpath(path, relative_to)
+
+ if return_paths:
+ return iris, dirs
+ return iris
+
rename_iris(onto, annotation='prefLabel')
+
+
+¶For IRIs with the given annotation, change the name of the entity
+to the value of the annotation. Also add an skos:exactMatch
+annotation referring to the old IRI.
ontopy/utils.py
def rename_iris(onto, annotation="prefLabel"):
+ """For IRIs with the given annotation, change the name of the entity
+ to the value of the annotation. Also add an `skos:exactMatch`
+ annotation referring to the old IRI.
+ """
+ exactMatch = onto._abbreviate( # pylint:disable=invalid-name
+ "http://www.w3.org/2004/02/skos/core#exactMatch"
+ )
+ for entity in onto.get_entities():
+ if hasattr(entity, annotation) and getattr(entity, annotation):
+ onto._add_data_triple_spod(
+ entity.storid, exactMatch, entity.iri, ""
+ )
+ entity.name = getattr(entity, annotation).first()
+
write_catalog(irimap, output='catalog-v001.xml', directory='.', relative_paths=True, append=False)
+
+
+¶Write catalog file do disk.
+ +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
irimap |
+ dict |
+ dict mapping ontology IRIs (name) to actual locations +(URIs). It has the same format as the dict returned by +read_catalog(). |
+ required | +
output |
+ Union[str, Path] |
+ name of catalog file. |
+ 'catalog-v001.xml' |
+
directory |
+ Union[str, Path] |
+ directory path to the catalog file. Only used if |
+ '.' |
+
relative_paths |
+ bool |
+ whether to write file paths inside the catalog as +relative paths (instead of absolute paths). |
+ True |
+
append |
+ bool |
+ whether to append to a possible existing catalog file. +If false, an existing file will be overwritten. |
+ False |
+
ontopy/utils.py
def write_catalog(
+ irimap: dict,
+ output: "Union[str, Path]" = "catalog-v001.xml",
+ directory: "Union[str, Path]" = ".",
+ relative_paths: bool = True,
+ append: bool = False,
+): # pylint: disable=redefined-builtin
+ """Write catalog file do disk.
+
+ Args:
+ irimap: dict mapping ontology IRIs (name) to actual locations
+ (URIs). It has the same format as the dict returned by
+ read_catalog().
+ output: name of catalog file.
+ directory: directory path to the catalog file. Only used if `output`
+ is a relative path.
+ relative_paths: whether to write file paths inside the catalog as
+ relative paths (instead of absolute paths).
+ append: whether to append to a possible existing catalog file.
+ If false, an existing file will be overwritten.
+ """
+ filename = Path(directory) / output
+
+ if relative_paths:
+ irimap = irimap.copy() # don't modify provided irimap
+ for iri, path in irimap.items():
+ if os.path.isabs(path):
+ irimap[iri] = os.path.relpath(path, filename.parent)
+
+ if filename.exists() and append:
+ iris = read_catalog(filename)
+ iris.update(irimap)
+ irimap = iris
+
+ res = [
+ '<?xml version="1.0" encoding="UTF-8" standalone="no"?>',
+ '<catalog prefer="public" '
+ 'xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog">',
+ ' <group id="Folder Repository, directory=, recursive=true, '
+ 'Auto-Update=false, version=2" prefer="public" xml:base="">',
+ ]
+ for iri, path in irimap.items():
+ res.append(f' <uri name="{iri}" uri="{path}"/>')
+ res.append(" </group>")
+ res.append("</catalog>")
+ with open(filename, "wt") as handle:
+ handle.write("\n".join(res) + "\n")
+
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Wa=/["'&<>]/;Vn.exports=Ua;function Ua(e){var t=""+e,r=Wa.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i