diff --git a/physics_driven_ml/utils/__init__.py b/physics_driven_ml/utils/__init__.py deleted file mode 100644 index 9993ae8..0000000 --- a/physics_driven_ml/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from physics_driven_ml.utils.utils import ModelConfig, get_logger # noqa: F401 diff --git a/physics_driven_ml/utils/graph/adjacency_dofs.pyx b/physics_driven_ml/utils/graph/adjacency_dofs.pyx deleted file mode 100644 index c434227..0000000 --- a/physics_driven_ml/utils/graph/adjacency_dofs.pyx +++ /dev/null @@ -1,116 +0,0 @@ -# Which one is faster: getAdjacency for every entity or use hashmap ? - -# cython: language_level=3 - -from firedrake.petsc import PETSc -from firedrake.utils import IntType - -import numpy as np - -cimport cython -cimport numpy as np -cimport petsc4py.PETSc as PETSc - -include "petschdr.pxi" - -@cython.boundscheck(False) -@cython.wraparound(False) -def build_dof_adjacency(ndofs, num_entities, dmplex, section): - cdef: - PetscInt i, j, k, dof, size_entity_adj, max_adjacent_dofs - np.ndarray[PetscInt, ndim=1, mode="c"] number_adj_entities - np.ndarray[PetscInt, ndim=1, mode="c"] adj_entity_offset - np.ndarray[PetscInt, ndim=1, mode="c"] entity_per_dof_count - np.ndarray[PetscInt, ndim=1, mode="c"] dof_to_entity_offset - np.ndarray[PetscInt, ndim=1, mode="c"] adj_entities - np.ndarray[PetscInt, ndim=1, mode="c"] dofs_to_entities - np.ndarray[PetscInt, ndim=1, mode="c"] dofs_idx - np.ndarray[PetscInt, ndim=2, mode="c"] adjacency_dofs - - # Number of adjacent entities of each entity - number_adj_entities = np.zeros(num_entities + 1, dtype=IntType) - # Offset of each entity in `adj_entities` - adj_entity_offset = np.empty(num_entities + 1, dtype=IntType) - # Number of entities associated with each dof - entity_per_dof_count = np.zeros(ndofs + 1, dtype=IntType) - # Offset of each dof in `dofs_to_entities` - dof_to_entity_offset = np.empty(ndofs + 1, dtype=IntType) - # Mapping from dof to entity - dofs_to_entities = np.empty(ndofs, dtype=IntType) - # Helper array to populate `dofs_to_entities` - dofs_idx = np.zeros(ndofs, dtype=IntType) - # Adjacency list of degrees of freedom - # -> 100 is the upper bound on the number of adjacent dofs - adjacency_dofs = np.empty((100 * ndofs, 2), dtype=IntType) - - - # Preprocessing loop: populate `number_adj_entities` and `entity_per_dof_count` - size_entity_adj = 0 - for i in range(num_entities): - # Number of adjacent entities of entity i - number_adj_entities[i + 1] = dmplex.getAdjacency(i).size - size_entity_adj += number_adj_entities[i + 1] - # Number of dofs associated with entity i - dof_offset = section.getOffset(i) + 1 - for n in range(section.getDof(i)): - entity_per_dof_count[dof_offset + n] += 1 - - # Compute offsets - dof_to_entity_offset = np.cumsum(entity_per_dof_count, dtype=IntType) - adj_entity_offset = np.cumsum(number_adj_entities, dtype=IntType) - # Free memory of both: `entity_per_dof_count` and `number_adj_entities` - del entity_per_dof_count, number_adj_entities - - # Adjacency list of entities - adj_entities = np.empty(size_entity_adj, dtype=IntType) - - # Populate `adj_entities`, `dofs_to_entities`, and `dofs_idx` - for i in range(num_entities): - # Build adjacency list of entities - # adj_entities: - # entity i - # | --------- | --------- | --------- | - # ^ ^ - # | | - # a b - # with `a = adj_entity_offset[i]` and `b = adj_entity_offset[i + 1]` - start_ent, end_ent = adj_entity_offset[i], adj_entity_offset[i+1] - adj_i = dmplex.getAdjacency(i) - for j in range(start_ent, end_ent): - adj_entities[j] = adj_i[j - start_ent] - # Build mapping from dofs to entities - # dofs_to_entities: - # dof j - # | --------- | --------- | --------- | - # ^ ^ - # | | - # a b - # with `a = dof_to_entity_offset[j]` and `b = dof_to_entity_offset[j + 1]` - start_dof = section.getOffset(i) - end_dof = start_dof + section.getDof(i) - for dof in range(start_dof, end_dof): - dof_offset = dof_to_entity_offset[dof] - dofs_to_entities[dof_offset + dofs_idx[dof]] = i - dofs_idx[dof] += 1 - - # Populate `adjacency_dofs` - max_adjacent_dofs = 0 - for i in range(ndofs): - # Loop over entities related to `dof` - for j in range(dof_to_entity_offset[i], dof_to_entity_offset[i + 1]): - e = dofs_to_entities[j] - # Loop over adjacent entities of `e` - start_ent, end_ent = adj_entity_offset[e], adj_entity_offset[e+1] - for k in range(start_ent, end_ent): - ek = adj_entities[k] - # Loop over dofs associated with `ek` - start_dof = section.getOffset(ek) - end_dof = start_dof + section.getDof(ek) - for dof in range(start_dof, end_dof): - # Add edge to adjacency list if not already present - if i >= dof: - adjacency_dofs[max_adjacent_dofs, 0] = dof - adjacency_dofs[max_adjacent_dofs, 1] = i - max_adjacent_dofs += 1 - - return adjacency_dofs[:max_adjacent_dofs, :] diff --git a/physics_driven_ml/utils/graph/mesh_to_graph.py b/physics_driven_ml/utils/graph/mesh_to_graph.py deleted file mode 100644 index 90ae201..0000000 --- a/physics_driven_ml/utils/graph/mesh_to_graph.py +++ /dev/null @@ -1,13 +0,0 @@ -from physics_driven_ml.utils.graph.adjacency_dofs import build_dof_adjacency - - -def build_adjacency(mesh, V): - dmplex = mesh.topology_dm - section = V.dm.getLocalSection() - # Get number of dofs - ndofs = V.dof_count - # Get number of entities of each dimension - dimension = mesh.geometric_dimension() - # Total number of mesh entities - num_entities = sum(mesh.num_entities(d) for d in range(dimension + 1)) - return build_dof_adjacency(ndofs, num_entities, dmplex, section) diff --git a/physics_driven_ml/utils/graph/mpi-compat.h b/physics_driven_ml/utils/graph/mpi-compat.h deleted file mode 100644 index 9a604f8..0000000 --- a/physics_driven_ml/utils/graph/mpi-compat.h +++ /dev/null @@ -1,19 +0,0 @@ -/* Author: Lisandro Dalcin */ -/* Contact: dalcinl@gmail.com */ - -#ifndef MPI_COMPAT_H -#define MPI_COMPAT_H - -#include - -#if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) -typedef void *PyMPI_MPI_Message; -#define MPI_Message PyMPI_MPI_Message -#endif - -#if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) -typedef void *PyMPI_MPI_Session; -#define MPI_Session PyMPI_MPI_Session -#endif - -#endif/*MPI_COMPAT_H*/ diff --git a/physics_driven_ml/utils/graph/petschdr.pxi b/physics_driven_ml/utils/graph/petschdr.pxi deleted file mode 100644 index d956a01..0000000 --- a/physics_driven_ml/utils/graph/petschdr.pxi +++ /dev/null @@ -1,185 +0,0 @@ -cimport petsc4py.PETSc as PETSc -cimport mpi4py.MPI as MPI -cimport numpy as np - -cdef extern from "mpi-compat.h": - pass - -#IF COMPLEX: -# ctypedef np.complex128_t PetscScalar -#ELSE: -ctypedef double PetscScalar - -cdef extern from "petsc.h": - ctypedef long PetscInt - ctypedef double PetscReal - ctypedef enum PetscBool: - PETSC_TRUE, PETSC_FALSE - ctypedef enum PetscCopyMode: - PETSC_COPY_VALUES, - PETSC_OWN_POINTER, - PETSC_USE_POINTER - ctypedef enum PetscDataType: - PETSC_INT, - PETSC_REAL, - PETSC_SCALAR, - #PETSC_COMPLEX, - PETSC_DATATYPE_UNKNOWN - -cdef extern from "petscsys.h" nogil: - int PetscMalloc1(PetscInt,void*) - int PetscMalloc2(PetscInt,void*,PetscInt,void*) - int PetscFree(void*) - int PetscFree2(void*,void*) - int PetscSortIntWithArray(PetscInt,PetscInt[],PetscInt[]) - -cdef extern from "petscdmplex.h" nogil: - int DMPlexGetHeightStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) - int DMPlexGetDepthStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) - int DMPlexGetPointHeight(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetPointDepth(PETSc.PetscDM,PetscInt,PetscInt*) - - int DMPlexGetChart(PETSc.PetscDM,PetscInt*,PetscInt*) - int DMPlexGetConeSize(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetCone(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetConeOrientation(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetSupportSize(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetSupport(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetMaxSizes(PETSc.PetscDM,PetscInt*,PetscInt*) - - int DMPlexGetTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) - int DMPlexRestoreTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) - int DMPlexDistributeData(PETSc.PetscDM,PETSc.PetscSF,PETSc.PetscSection,MPI.MPI_Datatype,void*,PETSc.PetscSection,void**) - int DMPlexSetAdjacencyUser(PETSc.PetscDM,int(*)(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt[],void*),void*) - int DMPlexCreatePointNumbering(PETSc.PetscDM,PETSc.PetscIS*) - int DMPlexLabelComplete(PETSc.PetscDM, PETSc.PetscDMLabel) - -cdef extern from "petscdmlabel.h" nogil: - struct _n_DMLabel - ctypedef _n_DMLabel* DMLabel "DMLabel" - int DMLabelCreateIndex(DMLabel, PetscInt, PetscInt) - int DMLabelDestroyIndex(DMLabel) - int DMLabelDestroy(DMLabel*) - int DMLabelHasPoint(DMLabel, PetscInt, PetscBool*) - int DMLabelSetValue(DMLabel, PetscInt, PetscInt) - int DMLabelGetValue(DMLabel, PetscInt, PetscInt*) - int DMLabelClearValue(DMLabel, PetscInt, PetscInt) - int DMLabelGetStratumSize(DMLabel, PetscInt, PetscInt*) - int DMLabelGetStratumIS(DMLabel, PetscInt, PETSc.PetscIS*) - -cdef extern from "petscdm.h" nogil: - int DMCreateLabel(PETSc.PetscDM,char[]) - int DMGetLabel(PETSc.PetscDM,char[],DMLabel*) - int DMGetPointSF(PETSc.PetscDM,PETSc.PetscSF*) - -cdef extern from "petscdmswarm.h" nogil: - int DMSwarmGetLocalSize(PETSc.PetscDM,PetscInt*) - int DMSwarmGetCellDM(PETSc.PetscDM, PETSc.PetscDM*) - int DMSwarmGetField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) - int DMSwarmRestoreField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) - -cdef extern from "petscvec.h" nogil: - int VecGetArray(PETSc.PetscVec,PetscScalar**) - int VecRestoreArray(PETSc.PetscVec,PetscScalar**) - int VecGetArrayRead(PETSc.PetscVec,const PetscScalar**) - int VecRestoreArrayRead(PETSc.PetscVec,const PetscScalar**) - -cdef extern from "petscis.h" nogil: - int PetscSectionGetOffset(PETSc.PetscSection,PetscInt,PetscInt*) - int PetscSectionGetDof(PETSc.PetscSection,PetscInt,PetscInt*) - int PetscSectionSetDof(PETSc.PetscSection,PetscInt,PetscInt) - int PetscSectionSetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt) - int PetscSectionGetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt*) - int PetscSectionGetMaxDof(PETSc.PetscSection,PetscInt*) - int PetscSectionSetPermutation(PETSc.PetscSection,PETSc.PetscIS) - int ISGetIndices(PETSc.PetscIS,PetscInt*[]) - int ISGetSize(PETSc.PetscIS,PetscInt*) - int ISRestoreIndices(PETSc.PetscIS,PetscInt*[]) - int ISGeneralSetIndices(PETSc.PetscIS,PetscInt,PetscInt[],PetscCopyMode) - int ISLocalToGlobalMappingCreateIS(PETSc.PetscIS,PETSc.PetscLGMap*) - int ISLocalToGlobalMappingGetSize(PETSc.PetscLGMap,PetscInt*) - int ISLocalToGlobalMappingGetBlockIndices(PETSc.PetscLGMap, const PetscInt**) - int ISLocalToGlobalMappingRestoreBlockIndices(PETSc.PetscLGMap, const PetscInt**) - int ISDestroy(PETSc.PetscIS*) - -cdef extern from "petscsf.h" nogil: - struct PetscSFNode_: - PetscInt rank - PetscInt index - ctypedef PetscSFNode_ PetscSFNode "PetscSFNode" - - int PetscSFGetGraph(PETSc.PetscSF,PetscInt*,PetscInt*,PetscInt**,PetscSFNode**) - int PetscSFSetGraph(PETSc.PetscSF,PetscInt,PetscInt,PetscInt*,PetscCopyMode,PetscSFNode*,PetscCopyMode) - int PetscSFBcastBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,) - int PetscSFBcastEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*) - int PetscSFReduceBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) - int PetscSFReduceEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) - -ctypedef int (*PetscPCPatchComputeFunction)(PETSc.PetscPC, - PetscInt, - PETSc.PetscVec, - PETSc.PetscVec, - PETSc.PetscIS, - PetscInt, - const PetscInt*, - const PetscInt*, - void*) -ctypedef int (*PetscPCPatchComputeOperator)(PETSc.PetscPC, - PetscInt, - PETSc.PetscVec, - PETSc.PetscMat, - PETSc.PetscIS, - PetscInt, - const PetscInt*, - const PetscInt*, - void*) -cdef extern from "petscsnes.h" nogil: - int SNESPatchSetComputeFunction(PETSc.PetscSNES, PetscPCPatchComputeFunction, void *) - int SNESPatchSetComputeOperator(PETSc.PetscSNES, PetscPCPatchComputeOperator, void *) - -cdef extern from "petscpc.h" nogil: - int PCPatchSetComputeFunction(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) - int PCPatchSetComputeFunctionInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) - int PCPatchSetComputeOperator(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) - int PCPatchSetComputeOperatorInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) - -cdef extern from "petscbt.h" nogil: - ctypedef char * PetscBT - int PetscBTCreate(PetscInt,PetscBT*) - int PetscBTDestroy(PetscBT*) - char PetscBTLookup(PetscBT,PetscInt) - int PetscBTSet(PetscBT,PetscInt) - -cdef extern from "petscmat.h" nogil: - int MatSetValuesLocal(PETSc.PetscMat, PetscInt, const PetscInt[], PetscInt, const PetscInt[], - const PetscScalar[], PetscInt) - int MatAssemblyBegin(PETSc.PetscMat, PetscInt) - int MatAssemblyEnd(PETSc.PetscMat, PetscInt) - PetscInt MAT_FINAL_ASSEMBLY = 0 - -cdef extern from * nogil: - int PetscObjectTypeCompare(PETSc.PetscObject, char[], PetscBool*) - -# --- Error handling taken from petsc4py (src/PETSc.pyx) ------------- - -cdef extern from *: - void PyErr_SetObject(object, object) - void *PyExc_RuntimeError - -cdef object PetscError = PyExc_RuntimeError - -cdef inline int SETERR(int ierr) with gil: - if (PetscError) != NULL: - PyErr_SetObject(PetscError, ierr) - else: - PyErr_SetObject(PyExc_RuntimeError, ierr) - return ierr - -cdef inline int CHKERR(int ierr) nogil except -1: - if ierr == 0: - return 0 # no error - else: - SETERR(ierr) - return -1 - -# -------------------------------------------------------------------- diff --git a/physics_driven_ml/utils/utils.py b/physics_driven_ml/utils/utils.py deleted file mode 100644 index a731c85..0000000 --- a/physics_driven_ml/utils/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys -import json -import logging -from dataclasses import dataclass - - -@dataclass -class ModelConfig: - - # Directories - data_dir: str = "" - model_dir: str = "" - model_version: str = "" - - # Model architecture - model: str = "encoder-decoder" - input_shape: int = 1 - dropout: float = 0.0 - device: str = "cpu" - - # Evaluation - eval_set: str = "" - max_eval_steps: int = 5000 - - # Dataset - dataset: str = "heat_conductivity" - - # Optimisation - alpha: float = 1e-3 - epochs: int = 100 - batch_size: int = 1 - learning_rate: float = 1e-3 - evaluation_metric: str = "L2" - - def __post_init__(self): - - assert self.model in {"encoder-decoder", "cnn"} - - if self.batch_size != 1: - # This can easily be implemented by using Firedrake ensemble parallelism. - # Ensemble parallelism is critical if the Firedrake operator composed with PyTorch is expensive to evaluate, e.g. when solving a PDE. - raise NotImplementedError("Batch size > 1 necessitates using Firedrake ensemble parallelism. See https://www.firedrakeproject.org/parallelism.html#ensemble-parallelism") - - def add_input_shape(self, input_shape: int): - self.input_shape = input_shape - - @classmethod - def from_file(cls, filepath: str): - with open(filepath, "r") as f: - cfg = json.load(f) - return cls(**cfg) - - def to_file(self, filename: str): - with open(filename, "w") as f: - json.dump(self.__dict__, f) - - -def get_logger(name: str = "main"): - - logger = logging.getLogger(name) - logger.setLevel(logging.INFO) - - handler = logging.StreamHandler(sys.stdout) - handler.setLevel(logging.INFO) - - formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt="%H:%M:%S") - handler.setFormatter(formatter) - - logger.addHandler(handler) - - return logger