diff --git a/.github/workflows/test_warpmesh.yml b/.github/workflows/test_suite.yml similarity index 92% rename from .github/workflows/test_warpmesh.yml rename to .github/workflows/test_suite.yml index 9708078..b6198e4 100644 --- a/.github/workflows/test_warpmesh.yml +++ b/.github/workflows/test_suite.yml @@ -1,4 +1,4 @@ -name: Install and Test WarpMesh +name: Install and test UM2N on: push: @@ -9,7 +9,7 @@ on: jobs: test-warpmesh: - name: Test WarpMesh + name: Test UM2N runs-on: ubuntu-latest container: image: firedrakeproject/firedrake:latest @@ -52,12 +52,12 @@ jobs: . /home/firedrake/firedrake/bin/activate python3 -m pip install 'git+https://github.com/facebookresearch/pytorch3d.git' - - name: Install WarpMesh + - name: Install UM2N run: | . /home/firedrake/firedrake/bin/activate python3 -m pip install -e . - - name: Run WarpMesh test suite + - name: Run UM2N test suite run: | . /home/firedrake/firedrake/bin/activate python3 -m pytest tests/test* -v diff --git a/README.md b/README.md index 133a234..e299171 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Solving complex Partial Differential Equations (PDEs) accurately and efficiently The latest test status: -[![UM2N](https://github.com/mesh-adaptation/UM2N/actions/workflows/test_warpmesh.yml/badge.svg)](https://github.com/mesh-adaptation/UM2N/actions/workflows/test_warpmesh.yml) +[![UM2N](https://github.com/mesh-adaptation/UM2N/actions/workflows/test_suite.yml/badge.svg)](https://github.com/mesh-adaptation/UM2N/actions/workflows/test_suite.yml) ## 🛠️ Installation @@ -31,7 +31,7 @@ Just navigate to **project root** folder, open terminal and execute the ``` This will install [Firedrake](https://www.firedrakeproject.org/download.html) and [Movement](https://github.com/mesh-adaptation/movement) under the `install` -folder, as well as the `WarpMesh` package. Note that the pytorch installed is a cpu version. +folder, as well as the `UM2N` package. Note that the pytorch installed is a cpu version. - GPU (cuda) support @@ -114,7 +114,7 @@ The number of samples in the dataset can be changed by modifying the variable ## 🚀 Train the model (This is outdated) -A training notebook is provided: `script/train_warpmesh.ipynb`. Further training +A training notebook is provided: `script/train_um2n.ipynb`. Further training details can be found in the notebook. Here is also a link to pre-trained models: @@ -136,7 +136,7 @@ The documentation is generated by Sphinx. To build the documentation, under the ## 🧩 Project Layout ``` -├── warpmesh (Implementation of the project) +├── UM2N (Implementation of the project) │ ├── __init__.py │ ├── generator (Dataset generator) │ ├── processor (Data processor) diff --git a/warpmesh/__init__.py b/UM2N/__init__.py similarity index 100% rename from warpmesh/__init__.py rename to UM2N/__init__.py diff --git a/warpmesh/generator/README.md b/UM2N/generator/README.md similarity index 100% rename from warpmesh/generator/README.md rename to UM2N/generator/README.md diff --git a/warpmesh/generator/__init__.py b/UM2N/generator/__init__.py similarity index 100% rename from warpmesh/generator/__init__.py rename to UM2N/generator/__init__.py diff --git a/warpmesh/generator/burgers_solver.py b/UM2N/generator/burgers_solver.py similarity index 99% rename from warpmesh/generator/burgers_solver.py rename to UM2N/generator/burgers_solver.py index 2fbd907..ca0b77e 100644 --- a/warpmesh/generator/burgers_solver.py +++ b/UM2N/generator/burgers_solver.py @@ -9,8 +9,6 @@ import movement as mv import numpy as np # noqa -import warpmesh as wm # noqa - __all__ = ["BurgersSolver"] diff --git a/warpmesh/generator/equation_generator.py b/UM2N/generator/equation_generator.py similarity index 100% rename from warpmesh/generator/equation_generator.py rename to UM2N/generator/equation_generator.py diff --git a/warpmesh/generator/equation_solver.py b/UM2N/generator/equation_solver.py similarity index 100% rename from warpmesh/generator/equation_solver.py rename to UM2N/generator/equation_solver.py diff --git a/warpmesh/generator/mesh_generator.py b/UM2N/generator/mesh_generator.py similarity index 99% rename from warpmesh/generator/mesh_generator.py rename to UM2N/generator/mesh_generator.py index 2470132..3c50d4e 100644 --- a/warpmesh/generator/mesh_generator.py +++ b/UM2N/generator/mesh_generator.py @@ -3,7 +3,7 @@ import firedrake as fd import movement as mv -from warpmesh.generator.equation_solver import EquationSolver +from UM2N.generator.equation_solver import EquationSolver os.environ["OMP_NUM_THREADS"] = "1" __all__ = ["MeshGenerator"] diff --git a/warpmesh/generator/polymesh.py b/UM2N/generator/polymesh.py similarity index 100% rename from warpmesh/generator/polymesh.py rename to UM2N/generator/polymesh.py diff --git a/warpmesh/generator/rand_source_generator.py b/UM2N/generator/rand_source_generator.py similarity index 100% rename from warpmesh/generator/rand_source_generator.py rename to UM2N/generator/rand_source_generator.py diff --git a/warpmesh/generator/squaremesh.py b/UM2N/generator/squaremesh.py similarity index 100% rename from warpmesh/generator/squaremesh.py rename to UM2N/generator/squaremesh.py diff --git a/warpmesh/generator/swirl_demo.py b/UM2N/generator/swirl_demo.py similarity index 100% rename from warpmesh/generator/swirl_demo.py rename to UM2N/generator/swirl_demo.py diff --git a/warpmesh/generator/swirl_solver.py b/UM2N/generator/swirl_solver.py similarity index 98% rename from warpmesh/generator/swirl_solver.py rename to UM2N/generator/swirl_solver.py index 21c8a97..06b4bb3 100644 --- a/warpmesh/generator/swirl_solver.py +++ b/UM2N/generator/swirl_solver.py @@ -8,7 +8,7 @@ import firedrake as fd # noqa import torch import movement as mv # noqa -import warpmesh as wm # noqa +import UM2N import numpy as np import pandas as pd @@ -20,7 +20,7 @@ import matplotlib.pyplot as plt # noqa from tqdm import tqdm # noqa -from warpmesh.model.train_util import model_forward +from UM2N.model.train_util import model_forward def get_log_og(log_path, idx): @@ -826,7 +826,7 @@ def eval_problem( # self.u_prev_adapt.project(self.u_cur) # check mesh integrity - Only perform evaluation on non-tangling mesh # noqa - num_tangle = wm.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa + num_tangle = UM2N.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa if isinstance(num_tangle, torch.Tensor): num_tangle = num_tangle.item() if num_tangle > 0: # has tangled elems: @@ -859,7 +859,7 @@ def eval_problem( self.mesh_new.coordinates.dat.data[:] = y if ((step + 1) % self.save_interval == 0) or (step == 0): - fig, plot_data_dict = wm.plot_compare( + fig, plot_data_dict = UM2N.plot_compare( self.mesh_fine, self.mesh, self.mesh_new, @@ -943,16 +943,16 @@ def make_all_dirs(self, log_path, plot_path, plot_more_path, plot_data_path): self.make_plot_data_dir(plot_data_path) def make_log_dir(self, log_path): - wm.mkdir_if_not_exist(log_path) + UM2N.mkdir_if_not_exist(log_path) def make_plot_dir(self, plot_path): - wm.mkdir_if_not_exist(plot_path) + UM2N.mkdir_if_not_exist(plot_path) def make_plot_more_dir(self, plot_more_path): - wm.mkdir_if_not_exist(plot_more_path) + UM2N.mkdir_if_not_exist(plot_more_path) def make_plot_data_dir(self, plot_data_path): - wm.mkdir_if_not_exist(plot_data_path) + UM2N.mkdir_if_not_exist(plot_data_path) if __name__ == "__main__": diff --git a/warpmesh/generator/swirl_solver_step.py b/UM2N/generator/swirl_solver_step.py similarity index 99% rename from warpmesh/generator/swirl_solver_step.py rename to UM2N/generator/swirl_solver_step.py index b9921dd..96aba99 100644 --- a/warpmesh/generator/swirl_solver_step.py +++ b/UM2N/generator/swirl_solver_step.py @@ -7,7 +7,6 @@ import firedrake as fd # noqa import movement as mv # noqa -import warpmesh as wm # noqa import numpy as np import firedrake.function as ffunc diff --git a/warpmesh/helper/__init__.py b/UM2N/helper/__init__.py similarity index 100% rename from warpmesh/helper/__init__.py rename to UM2N/helper/__init__.py diff --git a/warpmesh/helper/config.py b/UM2N/helper/config.py similarity index 100% rename from warpmesh/helper/config.py rename to UM2N/helper/config.py diff --git a/warpmesh/helper/helper.py b/UM2N/helper/helper.py similarity index 100% rename from warpmesh/helper/helper.py rename to UM2N/helper/helper.py diff --git a/warpmesh/helper/plot_util.py b/UM2N/helper/plot_util.py similarity index 100% rename from warpmesh/helper/plot_util.py rename to UM2N/helper/plot_util.py diff --git a/warpmesh/helper/plotting.py b/UM2N/helper/plotting.py similarity index 100% rename from warpmesh/helper/plotting.py rename to UM2N/helper/plotting.py diff --git a/warpmesh/loader/__init__.py b/UM2N/loader/__init__.py similarity index 100% rename from warpmesh/loader/__init__.py rename to UM2N/loader/__init__.py diff --git a/warpmesh/loader/cluster_utils.py b/UM2N/loader/cluster_utils.py similarity index 100% rename from warpmesh/loader/cluster_utils.py rename to UM2N/loader/cluster_utils.py diff --git a/warpmesh/loader/data_transform.py b/UM2N/loader/data_transform.py similarity index 100% rename from warpmesh/loader/data_transform.py rename to UM2N/loader/data_transform.py diff --git a/warpmesh/loader/dataset.py b/UM2N/loader/dataset.py similarity index 100% rename from warpmesh/loader/dataset.py rename to UM2N/loader/dataset.py diff --git a/warpmesh/model/M2N.py b/UM2N/model/M2N.py similarity index 100% rename from warpmesh/model/M2N.py rename to UM2N/model/M2N.py diff --git a/warpmesh/model/M2N_T.py b/UM2N/model/M2N_T.py similarity index 100% rename from warpmesh/model/M2N_T.py rename to UM2N/model/M2N_T.py diff --git a/warpmesh/model/M2N_atten.py b/UM2N/model/M2N_atten.py similarity index 100% rename from warpmesh/model/M2N_atten.py rename to UM2N/model/M2N_atten.py diff --git a/warpmesh/model/M2N_dynamic_drop.py b/UM2N/model/M2N_dynamic_drop.py similarity index 100% rename from warpmesh/model/M2N_dynamic_drop.py rename to UM2N/model/M2N_dynamic_drop.py diff --git a/warpmesh/model/M2N_dynamic_no_drop.py b/UM2N/model/M2N_dynamic_no_drop.py similarity index 100% rename from warpmesh/model/M2N_dynamic_no_drop.py rename to UM2N/model/M2N_dynamic_no_drop.py diff --git a/warpmesh/model/M2T.py b/UM2N/model/M2T.py similarity index 100% rename from warpmesh/model/M2T.py rename to UM2N/model/M2T.py diff --git a/warpmesh/model/M2T_deformer.py b/UM2N/model/M2T_deformer.py similarity index 100% rename from warpmesh/model/M2T_deformer.py rename to UM2N/model/M2T_deformer.py diff --git a/warpmesh/model/MRN.py b/UM2N/model/MRN.py similarity index 100% rename from warpmesh/model/MRN.py rename to UM2N/model/MRN.py diff --git a/warpmesh/model/MRN_GTE.py b/UM2N/model/MRN_GTE.py similarity index 100% rename from warpmesh/model/MRN_GTE.py rename to UM2N/model/MRN_GTE.py diff --git a/warpmesh/model/MRN_LTE.py b/UM2N/model/MRN_LTE.py similarity index 100% rename from warpmesh/model/MRN_LTE.py rename to UM2N/model/MRN_LTE.py diff --git a/warpmesh/model/MRN_atten.py b/UM2N/model/MRN_atten.py similarity index 100% rename from warpmesh/model/MRN_atten.py rename to UM2N/model/MRN_atten.py diff --git a/warpmesh/model/MRN_fix.py b/UM2N/model/MRN_fix.py similarity index 100% rename from warpmesh/model/MRN_fix.py rename to UM2N/model/MRN_fix.py diff --git a/warpmesh/model/MRN_phi.py b/UM2N/model/MRN_phi.py similarity index 100% rename from warpmesh/model/MRN_phi.py rename to UM2N/model/MRN_phi.py diff --git a/warpmesh/model/MRT.py b/UM2N/model/MRT.py similarity index 100% rename from warpmesh/model/MRT.py rename to UM2N/model/MRT.py diff --git a/warpmesh/model/MRT_PE.py b/UM2N/model/MRT_PE.py similarity index 100% rename from warpmesh/model/MRT_PE.py rename to UM2N/model/MRT_PE.py diff --git a/warpmesh/model/MRT_phi.py b/UM2N/model/MRT_phi.py similarity index 100% rename from warpmesh/model/MRT_phi.py rename to UM2N/model/MRT_phi.py diff --git a/warpmesh/model/__init__.py b/UM2N/model/__init__.py similarity index 100% rename from warpmesh/model/__init__.py rename to UM2N/model/__init__.py diff --git a/warpmesh/model/deformer.py b/UM2N/model/deformer.py similarity index 100% rename from warpmesh/model/deformer.py rename to UM2N/model/deformer.py diff --git a/warpmesh/model/extractor.py b/UM2N/model/extractor.py similarity index 100% rename from warpmesh/model/extractor.py rename to UM2N/model/extractor.py diff --git a/warpmesh/model/gatdeformer.py b/UM2N/model/gatdeformer.py similarity index 100% rename from warpmesh/model/gatdeformer.py rename to UM2N/model/gatdeformer.py diff --git a/warpmesh/model/train_util.py b/UM2N/model/train_util.py similarity index 100% rename from warpmesh/model/train_util.py rename to UM2N/model/train_util.py diff --git a/warpmesh/model/transformer_model.py b/UM2N/model/transformer_model.py similarity index 100% rename from warpmesh/model/transformer_model.py rename to UM2N/model/transformer_model.py diff --git a/warpmesh/processor/__init__.py b/UM2N/processor/__init__.py similarity index 100% rename from warpmesh/processor/__init__.py rename to UM2N/processor/__init__.py diff --git a/warpmesh/processor/processor.py b/UM2N/processor/processor.py similarity index 100% rename from warpmesh/processor/processor.py rename to UM2N/processor/processor.py diff --git a/warpmesh/test/__init__.py b/UM2N/test/__init__.py similarity index 100% rename from warpmesh/test/__init__.py rename to UM2N/test/__init__.py diff --git a/warpmesh/test/bench_burgers.py b/UM2N/test/bench_burgers.py similarity index 97% rename from warpmesh/test/bench_burgers.py rename to UM2N/test/bench_burgers.py index 1eff2eb..a96c5dd 100644 --- a/warpmesh/test/bench_burgers.py +++ b/UM2N/test/bench_burgers.py @@ -13,8 +13,8 @@ import torch # noqa from torch_geometric.loader import DataLoader -import warpmesh as wm # noqa -from warpmesh.model.train_util import generate_samples +import UM2N +from UM2N.model.train_util import generate_samples def get_log_og(log_path, idx): @@ -302,7 +302,7 @@ def eval_problem(self): dur_ms = (end - start) * 1000 # check mesh integrity - Only perform evaluation on non-tangling mesh # noqa - num_tangle = wm.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa + num_tangle = UM2N.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa if isinstance(num_tangle, torch.Tensor): num_tangle = num_tangle.item() if num_tangle > 0: # has tangled elems: @@ -368,7 +368,7 @@ def eval_problem(self): df.to_csv(os.path.join(self.log_path, f"log{self.idx}_{cur_step}.csv")) # noqa # plot compare mesh - compare_plot = wm.plot_mesh_compare_benchmark( + compare_plot = UM2N.plot_mesh_compare_benchmark( out.detach().cpu().numpy(), sample.y.detach().cpu().numpy(), sample.face.detach().cpu().numpy(), @@ -485,13 +485,13 @@ def get_error(self): return error_og, error_adapt def make_log_dir(self): - wm.mkdir_if_not_exist(self.log_path) + UM2N.mkdir_if_not_exist(self.log_path) def make_plot_dir(self): - wm.mkdir_if_not_exist(self.plot_path) + UM2N.mkdir_if_not_exist(self.plot_path) def make_plot_more_dir(self): - wm.mkdir_if_not_exist(self.plot_more_path) + UM2N.mkdir_if_not_exist(self.plot_more_path) def make_plot_data_dir(self): - wm.mkdir_if_not_exist(self.plot_data_path) + UM2N.mkdir_if_not_exist(self.plot_data_path) diff --git a/warpmesh/test/bench_helmholtz.py b/UM2N/test/bench_helmholtz.py similarity index 100% rename from warpmesh/test/bench_helmholtz.py rename to UM2N/test/bench_helmholtz.py diff --git a/warpmesh/test/bench_swirl.py b/UM2N/test/bench_swirl.py similarity index 97% rename from warpmesh/test/bench_swirl.py rename to UM2N/test/bench_swirl.py index 46964e7..b9f2566 100644 --- a/warpmesh/test/bench_swirl.py +++ b/UM2N/test/bench_swirl.py @@ -13,8 +13,8 @@ import torch from torch_geometric.loader import DataLoader -import warpmesh as wm -from warpmesh.model.train_util import model_forward +import UM2N +from UM2N.model.train_util import model_forward def get_log_og(log_path, idx): @@ -126,10 +126,10 @@ def __init__( self.y_0 = kwargs.pop("y_0", 0.25) # initital condition of u on coarse / fine mesh - u_init_exp = wm.get_u_0( + u_init_exp = UM2N.get_u_0( self.x, self.y, self.r_0, self.x_0, self.y_0, self.sigma ) # noqa - u_init_exp_fine = wm.get_u_0( + u_init_exp_fine = UM2N.get_u_0( self.x_fine, self.y_fine, self.r_0, self.x_0, self.y_0, self.sigma ) # noqa self.u_init = fd.Function(self.scalar_space).interpolate(u_init_exp) @@ -265,7 +265,7 @@ def solve_u(self, t): Solve the PDE problem using RK (SSPRK) scheme on the coarse mesh store the solution field to a varaible: self.u_cur """ - c_exp = wm.get_c(self.x, self.y, t, alpha=self.alpha) + c_exp = UM2N.get_c(self.x, self.y, t, alpha=self.alpha) c_temp = fd.Function(self.vector_space).interpolate(c_exp) self.c.project(c_temp) @@ -283,7 +283,7 @@ def solve_u_fine(self, t): Solve the PDE problem using RK (SSPRK) scheme on the fine mesh store the solution field to a varaible: self.u_cur_fine """ - c_exp = wm.get_c(self.x_fine, self.y_fine, t, alpha=self.alpha) + c_exp = UM2N.get_c(self.x_fine, self.y_fine, t, alpha=self.alpha) c_temp = fd.Function(self.vector_space_fine).interpolate(c_exp) self.c_fine.project(c_temp) @@ -303,16 +303,16 @@ def project_u_(self): return def make_log_dir(self): - wm.mkdir_if_not_exist(self.log_path) + UM2N.mkdir_if_not_exist(self.log_path) def make_plot_dir(self): - wm.mkdir_if_not_exist(self.plot_path) + UM2N.mkdir_if_not_exist(self.plot_path) def make_plot_more_dir(self): - wm.mkdir_if_not_exist(self.plot_more_path) + UM2N.mkdir_if_not_exist(self.plot_more_path) def make_plot_data_dir(self): - wm.mkdir_if_not_exist(self.plot_data_path) + UM2N.mkdir_if_not_exist(self.plot_data_path) def eval_problem(self, model_name="model"): print("In eval problem") @@ -484,7 +484,7 @@ def eval_problem(self, model_name="model"): self.uh_model = fd.Function(function_space_model).project(self.u_cur) # noqa # check mesh integrity - Only perform evaluation on non-tangling mesh # noqa - num_tangle = wm.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa + num_tangle = UM2N.get_sample_tangle(out, sample.x[:, :2], sample.face) # noqa if isinstance(num_tangle, torch.Tensor): num_tangle = num_tangle.item() if num_tangle > 0: # has tangled elems: @@ -500,7 +500,7 @@ def eval_problem(self, model_name="model"): # self.solve_u_fine(self.t) u_fine = fd.Function(function_space_fine).project(self.u_cur_fine) # noqa - fig, plot_data_dict = wm.plot_compare( + fig, plot_data_dict = UM2N.plot_compare( self.mesh_fine, self.mesh_coarse, self.mesh_new, @@ -574,7 +574,7 @@ def eval_problem(self, model_name="model"): df = pd.DataFrame(res, index=[0]) df.to_csv(os.path.join(self.log_path, f"log_{idx:04d}.csv")) # plot compare mesh - plot_fig = wm.plot_mesh_compare_benchmark( + plot_fig = UM2N.plot_mesh_compare_benchmark( out.detach().cpu().numpy(), sample.y.detach().cpu().numpy(), sample.face.detach().cpu().numpy(), diff --git a/warpmesh/test/burgers_bench.py b/UM2N/test/burgers_bench.py similarity index 100% rename from warpmesh/test/burgers_bench.py rename to UM2N/test/burgers_bench.py diff --git a/warpmesh/test/compare_hlmhltz.py b/UM2N/test/compare_hlmhltz.py similarity index 97% rename from warpmesh/test/compare_hlmhltz.py rename to UM2N/test/compare_hlmhltz.py index 5b670b3..7386830 100644 --- a/warpmesh/test/compare_hlmhltz.py +++ b/UM2N/test/compare_hlmhltz.py @@ -4,7 +4,7 @@ import firedrake as fd import matplotlib.pyplot as plt # noqa -import warpmesh as wm +import UM2N __all__ = ["compare_error"] @@ -66,13 +66,13 @@ def func(x, y): # construct the helmholtz equation eq = None if problem_type == "helmholtz": - eq = wm.HelmholtzEqGenerator( + eq = UM2N.HelmholtzEqGenerator( params={ "u_exact_func": u_exact, } ) elif problem_type == "poisson": - eq = wm.PoissonEqGenerator( + eq = UM2N.PoissonEqGenerator( params={ "u_exact_func": u_exact, } @@ -80,7 +80,7 @@ def func(x, y): # solution on og mesh og_res = eq.discretise(mesh) - og_solver = wm.EquationSolver( + og_solver = UM2N.EquationSolver( params={ "function_space": og_res["function_space"], "LHS": og_res["LHS"], @@ -93,7 +93,7 @@ def func(x, y): # solution on MA mesh mesh_MA.coordinates.dat.data[:] = data_in.y.detach().cpu().numpy() ma_res = eq.discretise(mesh_MA) - ma_solver = wm.EquationSolver( + ma_solver = UM2N.EquationSolver( params={ "function_space": ma_res["function_space"], "LHS": ma_res["LHS"], @@ -107,7 +107,7 @@ def func(x, y): uh_model = None if num_tangle == 0: model_res = eq.discretise(mesh_model) - model_solver = wm.EquationSolver( + model_solver = UM2N.EquationSolver( params={ "function_space": model_res["function_space"], "LHS": model_res["LHS"], @@ -125,7 +125,7 @@ def func(x, y): res_high_res = eq.discretise(high_res_mesh) uh_exact = fd.interpolate(res_high_res["u_exact"], high_res_function_space) - fig, plot_data_dict = wm.plot_compare( + fig, plot_data_dict = UM2N.plot_compare( mesh_fine, mesh, mesh_MA, diff --git a/warpmesh/test/stat.py b/UM2N/test/stat.py similarity index 100% rename from warpmesh/test/stat.py rename to UM2N/test/stat.py diff --git a/warpmesh/test/tangle.py b/UM2N/test/tangle.py similarity index 100% rename from warpmesh/test/tangle.py rename to UM2N/test/tangle.py diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 2953023..0000000 --- a/docs/conf.py +++ /dev/null @@ -1,24 +0,0 @@ -# Author: Chunyang Wang -# GitHub Username: acse-cw1722 - -import os -import sys - -sys.path.insert(0, os.path.abspath(os.sep.join((os.curdir, "..")))) -sys.path.insert(0, os.path.abspath(os.sep.join((os.curdir, ".")))) -sys.path.insert(1, os.path.abspath(os.sep.join((os.curdir, "../warpmesh")))) -sys.path.insert(1, os.path.abspath(os.sep.join((os.curdir, "./warpmesh")))) - - -project = "warpmesh" -author = "Chunyang Wang" -release = "0.1" -latex_elements = { - "preamble": "\\usepackage[utf8x]{inputenc}", -} -extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.mathjax"] -source_suffix = ".rst" -master_doc = "index" -exclude_patterns = ["_build"] -autodoc_mock_imports = ["torch", "firedrake", "numpy", "torch_geometric"] -autoclass_content = "both" diff --git a/docs/html/.buildinfo b/docs/html/.buildinfo deleted file mode 100644 index d529c94..0000000 --- a/docs/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 19f588fece5ad734a13cdcab484ce55f -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/html/.doctrees/environment.pickle b/docs/html/.doctrees/environment.pickle deleted file mode 100644 index f507fea..0000000 Binary files a/docs/html/.doctrees/environment.pickle and /dev/null differ diff --git a/docs/html/.doctrees/index.doctree b/docs/html/.doctrees/index.doctree deleted file mode 100644 index 7af0300..0000000 Binary files a/docs/html/.doctrees/index.doctree and /dev/null differ diff --git a/docs/html/_sources/index.rst.txt b/docs/html/_sources/index.rst.txt deleted file mode 100644 index cc4fb91..0000000 --- a/docs/html/_sources/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. # Author: Chunyang Wang -.. # GitHub Username: acse-cw1722 - -WarpMesh: A Machin Learning Based Mesh Movement Package -******************************************************** - -WarpMesh package docs -======================================================== - -============ -.. toctree:: - :maxdepth: 2 - :caption: Contents: - -.. automodule:: generator - :members: MeshGenerator, HelmholtzSolver, RandomHelmholtzGenerator - -.. automodule:: processor - :members: MeshProcessor - -.. automodule:: model - :members: MRN, GlobalFeatExtractor, LocalFeatExtractor, RecurrentGATConv, train, evaluate, load_model, TangleCounter - -.. automodule:: loader - :members: MeshDataset, MeshData, normalise \ No newline at end of file diff --git a/docs/html/_static/alabaster.css b/docs/html/_static/alabaster.css deleted file mode 100644 index 517d0b2..0000000 --- a/docs/html/_static/alabaster.css +++ /dev/null @@ -1,703 +0,0 @@ -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: Georgia, serif; - font-size: 17px; - background-color: #fff; - color: #000; - margin: 0; - padding: 0; -} - - -div.document { - width: 940px; - margin: 30px auto 0 auto; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 220px; -} - -div.sphinxsidebar { - width: 220px; - font-size: 14px; - line-height: 1.5; -} - -hr { - border: 1px solid #B1B4B6; -} - -div.body { - background-color: #fff; - color: #3E4349; - padding: 0 30px 0 30px; -} - -div.body > .section { - text-align: left; -} - -div.footer { - width: 940px; - margin: 20px auto 30px auto; - font-size: 14px; - color: #888; - text-align: right; -} - -div.footer a { - color: #888; -} - -p.caption { - font-family: inherit; - font-size: inherit; -} - - -div.relations { - display: none; -} - - -div.sphinxsidebar a { - color: #444; - text-decoration: none; - border-bottom: 1px dotted #999; -} - -div.sphinxsidebar a:hover { - border-bottom: 1px solid #999; -} - -div.sphinxsidebarwrapper { - padding: 18px 10px; -} - -div.sphinxsidebarwrapper p.logo { - padding: 0; - margin: -10px 0 0 0px; - text-align: center; -} - -div.sphinxsidebarwrapper h1.logo { - margin-top: -10px; - text-align: center; - margin-bottom: 5px; - text-align: left; -} - -div.sphinxsidebarwrapper h1.logo-name { - margin-top: 0px; -} - -div.sphinxsidebarwrapper p.blurb { - margin-top: 0; - font-style: normal; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: Georgia, serif; - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinxsidebar p.logo a, -div.sphinxsidebar h3 a, -div.sphinxsidebar p.logo a:hover, -div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinxsidebar ul { - margin: 10px 0; - padding: 0; - color: #000; -} - -div.sphinxsidebar ul li.toctree-l1 > a { - font-size: 120%; -} - -div.sphinxsidebar ul li.toctree-l2 > a { - font-size: 110%; -} - -div.sphinxsidebar input { - border: 1px solid #CCC; - font-family: Georgia, serif; - font-size: 1em; -} - -div.sphinxsidebar hr { - border: none; - height: 1px; - color: #AAA; - background: #AAA; - - text-align: left; - margin-left: 0; - width: 50%; -} - -div.sphinxsidebar .badge { - border-bottom: none; -} - -div.sphinxsidebar .badge:hover { - border-bottom: none; -} - -/* To address an issue with donation coming after search */ -div.sphinxsidebar h3.donation { - margin-top: 10px; -} - -/* -- body styles ----------------------------------------------------------- */ - -a { - color: #004B6B; - text-decoration: underline; -} - -a:hover { - color: #6D4100; - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: Georgia, serif; - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } -div.body h2 { font-size: 180%; } -div.body h3 { font-size: 150%; } -div.body h4 { font-size: 130%; } -div.body h5 { font-size: 100%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #DDD; - padding: 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - color: #444; - background: #EAEAEA; -} - -div.body p, div.body dd, div.body li { - line-height: 1.4em; -} - -div.admonition { - margin: 20px 0px; - padding: 10px 30px; - background-color: #EEE; - border: 1px solid #CCC; -} - -div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fafafa; -} - -div.admonition p.admonition-title { - font-family: Georgia, serif; - font-weight: normal; - font-size: 24px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.admonition p.last { - margin-bottom: 0; -} - -div.highlight { - background-color: #fff; -} - -dt:target, .highlight { - background: #FAF3E8; -} - -div.warning { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.danger { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.error { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.caution { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.attention { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.important { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.note { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.tip { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.hint { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.seealso { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.topic { - background-color: #EEE; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre, tt, code { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; - font-size: 0.9em; -} - -.hll { - background-color: #FFC; - margin: 0 -12px; - padding: 0 12px; - display: block; -} - -img.screenshot { -} - -tt.descname, tt.descclassname, code.descname, code.descclassname { - font-size: 0.95em; -} - -tt.descname, code.descname { - padding-right: 0.08em; -} - -img.screenshot { - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils { - border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils td, table.docutils th { - border: 1px solid #888; - padding: 0.25em 0.7em; -} - -table.field-list, table.footnote { - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -table.footnote { - margin: 15px 0; - width: 100%; - border: 1px solid #EEE; - background: #FDFDFD; - font-size: 0.9em; -} - -table.footnote + table.footnote { - margin-top: -15px; - border-top: none; -} - -table.field-list th { - padding: 0 0.8em 0 0; -} - -table.field-list td { - padding: 0; -} - -table.field-list p { - margin-bottom: 0.8em; -} - -/* Cloned from - * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 - */ -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -table.footnote td.label { - width: .1px; - padding: 0.3em 0 0.3em 0.5em; -} - -table.footnote td { - padding: 0.3em 0.5em; -} - -dl { - margin-left: 0; - margin-right: 0; - margin-top: 0; - padding: 0; -} - -dl dd { - margin-left: 30px; -} - -blockquote { - margin: 0 0 0 30px; - padding: 0; -} - -ul, ol { - /* Matches the 30px from the narrow-screen "li > ul" selector below */ - margin: 10px 0 10px 30px; - padding: 0; -} - -pre { - background: #EEE; - padding: 7px 30px; - margin: 15px 0px; - line-height: 1.3em; -} - -div.viewcode-block:target { - background: #ffd; -} - -dl pre, blockquote pre, li pre { - margin-left: 0; - padding-left: 30px; -} - -tt, code { - background-color: #ecf0f3; - color: #222; - /* padding: 1px 2px; */ -} - -tt.xref, code.xref, a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fff; -} - -a.reference { - text-decoration: none; - border-bottom: 1px dotted #004B6B; -} - -/* Don't put an underline on images */ -a.image-reference, a.image-reference:hover { - border-bottom: none; -} - -a.reference:hover { - border-bottom: 1px solid #6D4100; -} - -a.footnote-reference { - text-decoration: none; - font-size: 0.7em; - vertical-align: top; - border-bottom: 1px dotted #004B6B; -} - -a.footnote-reference:hover { - border-bottom: 1px solid #6D4100; -} - -a:hover tt, a:hover code { - background: #EEE; -} - - -@media screen and (max-width: 870px) { - - div.sphinxsidebar { - display: none; - } - - div.document { - width: 100%; - - } - - div.documentwrapper { - margin-left: 0; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - } - - div.bodywrapper { - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - margin-left: 0; - } - - ul { - margin-left: 0; - } - - li > ul { - /* Matches the 30px from the "ul, ol" selector above */ - margin-left: 30px; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .bodywrapper { - margin: 0; - } - - .footer { - width: auto; - } - - .github { - display: none; - } - - - -} - - - -@media screen and (max-width: 875px) { - - body { - margin: 0; - padding: 20px 30px; - } - - div.documentwrapper { - float: none; - background: #fff; - } - - div.sphinxsidebar { - display: block; - float: none; - width: 102.5%; - margin: 50px -30px -20px -30px; - padding: 10px 20px; - background: #333; - color: #FFF; - } - - div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, - div.sphinxsidebar h3 a { - color: #fff; - } - - div.sphinxsidebar a { - color: #AAA; - } - - div.sphinxsidebar p.logo { - display: none; - } - - div.document { - width: 100%; - margin: 0; - } - - div.footer { - display: none; - } - - div.bodywrapper { - margin: 0; - } - - div.body { - min-height: 0; - padding: 0; - } - - .rtd_doc_footer { - display: none; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .footer { - width: auto; - } - - .github { - display: none; - } -} - - -/* misc. */ - -.revsys-inline { - display: none!important; -} - -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ -table.docutils.citation, table.docutils.citation td, table.docutils.citation th { - border: none; - /* Below needed in some edge cases; if not applied, bottom shadows appear */ - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - - -/* relbar */ - -.related { - line-height: 30px; - width: 100%; - font-size: 0.9rem; -} - -.related.top { - border-bottom: 1px solid #EEE; - margin-bottom: 20px; -} - -.related.bottom { - border-top: 1px solid #EEE; -} - -.related ul { - padding: 0; - margin: 0; - list-style: none; -} - -.related li { - display: inline; -} - -nav#rellinks { - float: right; -} - -nav#rellinks li+li:before { - content: "|"; -} - -nav#breadcrumbs li+li:before { - content: "\00BB"; -} - -/* Hide certain items when printing */ -@media print { - div.related { - display: none; - } -} \ No newline at end of file diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css deleted file mode 100644 index 30fee9d..0000000 --- a/docs/html/_static/basic.css +++ /dev/null @@ -1,925 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li p.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 360px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a:visited { - color: #551A8B; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, figure.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, figure.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, figure.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, figure.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar, -aside.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -nav.contents, -aside.topic, -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -nav.contents, -aside.topic, -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -aside.sidebar > :last-child, -nav.contents > :last-child, -aside.topic > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -aside.sidebar::after, -nav.contents::after, -aside.topic::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure, figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption, figcaption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number, -figcaption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text, -figcaption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - -/* -- object description styles --------------------------------------------- */ - -.sig { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; -} - -.sig-name, code.descname { - background-color: transparent; - font-weight: bold; -} - -.sig-name { - font-size: 1.1em; -} - -code.descname { - font-size: 1.2em; -} - -.sig-prename, code.descclassname { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.sig-param.n { - font-style: italic; -} - -/* C++ specific styling */ - -.sig-inline.c-texpr, -.sig-inline.cpp-texpr { - font-family: unset; -} - -.sig.c .k, .sig.c .kt, -.sig.cpp .k, .sig.cpp .kt { - color: #0033B3; -} - -.sig.c .m, -.sig.cpp .m { - color: #1750EB; -} - -.sig.c .s, .sig.c .sc, -.sig.cpp .s, .sig.cpp .sc { - color: #067D17; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -aside.footnote > span, -div.citation > span { - float: left; -} -aside.footnote > span:last-of-type, -div.citation > span:last-of-type { - padding-right: 0.5em; -} -aside.footnote > p { - margin-left: 2em; -} -div.citation > p { - margin-left: 4em; -} -aside.footnote > p:last-of-type, -div.citation > p:last-of-type { - margin-bottom: 0em; -} -aside.footnote > p:last-of-type:after, -div.citation > p:last-of-type:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -.sig dd { - margin-top: 0px; - margin-bottom: 0px; -} - -.sig dl { - margin-top: 0px; - margin-bottom: 0px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0 0.5em; - content: ":"; - display: inline-block; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -.translated { - background-color: rgba(207, 255, 207, 0.2) -} - -.untranslated { - background-color: rgba(255, 207, 207, 0.2) -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; - white-space: nowrap; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -span.linenos, -div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; - -webkit-user-select: text; /* Safari fallback only */ - -webkit-user-select: none; /* Chrome/Safari */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* IE10+ */ -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/html/_static/custom.css b/docs/html/_static/custom.css deleted file mode 100644 index 2a924f1..0000000 --- a/docs/html/_static/custom.css +++ /dev/null @@ -1 +0,0 @@ -/* This file intentionally left blank. */ diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js deleted file mode 100644 index d06a71d..0000000 --- a/docs/html/_static/doctools.js +++ /dev/null @@ -1,156 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Base JavaScript utilities for all Sphinx HTML documentation. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", -]); - -const _ready = (callback) => { - if (document.readyState !== "loading") { - callback(); - } else { - document.addEventListener("DOMContentLoaded", callback); - } -}; - -/** - * Small JavaScript module for the documentation. - */ -const Documentation = { - init: () => { - Documentation.initDomainIndexTable(); - Documentation.initOnKeyListeners(); - }, - - /** - * i18n support - */ - TRANSLATIONS: {}, - PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), - LOCALE: "unknown", - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext: (string) => { - const translated = Documentation.TRANSLATIONS[string]; - switch (typeof translated) { - case "undefined": - return string; // no translation - case "string": - return translated; // translation exists - default: - return translated[0]; // (singular, plural) translation tuple exists - } - }, - - ngettext: (singular, plural, n) => { - const translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated !== "undefined") - return translated[Documentation.PLURAL_EXPR(n)]; - return n === 1 ? singular : plural; - }, - - addTranslations: (catalog) => { - Object.assign(Documentation.TRANSLATIONS, catalog.messages); - Documentation.PLURAL_EXPR = new Function( - "n", - `return (${catalog.plural_expr})` - ); - Documentation.LOCALE = catalog.locale; - }, - - /** - * helper function to focus on search bar - */ - focusSearchBar: () => { - document.querySelectorAll("input[name=q]")[0]?.focus(); - }, - - /** - * Initialise the domain index toggle buttons - */ - initDomainIndexTable: () => { - const toggler = (el) => { - const idNumber = el.id.substr(7); - const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); - if (el.src.substr(-9) === "minus.png") { - el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; - toggledRows.forEach((el) => (el.style.display = "none")); - } else { - el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; - toggledRows.forEach((el) => (el.style.display = "")); - } - }; - - const togglerElements = document.querySelectorAll("img.toggler"); - togglerElements.forEach((el) => - el.addEventListener("click", (event) => toggler(event.currentTarget)) - ); - togglerElements.forEach((el) => (el.style.display = "")); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); - }, - - initOnKeyListeners: () => { - // only install a listener if it is really needed - if ( - !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && - !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS - ) - return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.altKey || event.ctrlKey || event.metaKey) return; - - if (!event.shiftKey) { - switch (event.key) { - case "ArrowLeft": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const prevLink = document.querySelector('link[rel="prev"]'); - if (prevLink && prevLink.href) { - window.location.href = prevLink.href; - event.preventDefault(); - } - break; - case "ArrowRight": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const nextLink = document.querySelector('link[rel="next"]'); - if (nextLink && nextLink.href) { - window.location.href = nextLink.href; - event.preventDefault(); - } - break; - } - } - - // some keyboard layouts may need Shift to get / - switch (event.key) { - case "/": - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; - Documentation.focusSearchBar(); - event.preventDefault(); - } - }); - }, -}; - -// quick alias for translations -const _ = Documentation.gettext; - -_ready(Documentation.init); diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js deleted file mode 100644 index e21c068..0000000 --- a/docs/html/_static/documentation_options.js +++ /dev/null @@ -1,13 +0,0 @@ -const DOCUMENTATION_OPTIONS = { - VERSION: '0.1', - LANGUAGE: 'en', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, - SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, -}; \ No newline at end of file diff --git a/docs/html/_static/file.png b/docs/html/_static/file.png deleted file mode 100644 index a858a41..0000000 Binary files a/docs/html/_static/file.png and /dev/null differ diff --git a/docs/html/_static/language_data.js b/docs/html/_static/language_data.js deleted file mode 100644 index 250f566..0000000 --- a/docs/html/_static/language_data.js +++ /dev/null @@ -1,199 +0,0 @@ -/* - * language_data.js - * ~~~~~~~~~~~~~~~~ - * - * This script contains the language-specific data used by searchtools.js, - * namely the list of stopwords, stemmer, scorer and splitter. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; - - -/* Non-minified version is copied as a separate JS file, is available */ - -/** - * Porter Stemmer - */ -var Stemmer = function() { - - var step2list = { - ational: 'ate', - tional: 'tion', - enci: 'ence', - anci: 'ance', - izer: 'ize', - bli: 'ble', - alli: 'al', - entli: 'ent', - eli: 'e', - ousli: 'ous', - ization: 'ize', - ation: 'ate', - ator: 'ate', - alism: 'al', - iveness: 'ive', - fulness: 'ful', - ousness: 'ous', - aliti: 'al', - iviti: 'ive', - biliti: 'ble', - logi: 'log' - }; - - var step3list = { - icate: 'ic', - ative: '', - alize: 'al', - iciti: 'ic', - ical: 'ic', - ful: '', - ness: '' - }; - - var c = "[^aeiou]"; // consonant - var v = "[aeiouy]"; // vowel - var C = c + "[^aeiouy]*"; // consonant sequence - var V = v + "[aeiou]*"; // vowel sequence - - var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - diff --git a/docs/html/_static/minus.png b/docs/html/_static/minus.png deleted file mode 100644 index d96755f..0000000 Binary files a/docs/html/_static/minus.png and /dev/null differ diff --git a/docs/html/_static/plus.png b/docs/html/_static/plus.png deleted file mode 100644 index 7107cec..0000000 Binary files a/docs/html/_static/plus.png and /dev/null differ diff --git a/docs/html/_static/pygments.css b/docs/html/_static/pygments.css deleted file mode 100644 index 57c7df3..0000000 --- a/docs/html/_static/pygments.css +++ /dev/null @@ -1,84 +0,0 @@ -pre { line-height: 125%; } -td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -.highlight .hll { background-color: #ffffcc } -.highlight { background: #f8f8f8; } -.highlight .c { color: #8f5902; font-style: italic } /* Comment */ -.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ -.highlight .g { color: #000000 } /* Generic */ -.highlight .k { color: #004461; font-weight: bold } /* Keyword */ -.highlight .l { color: #000000 } /* Literal */ -.highlight .n { color: #000000 } /* Name */ -.highlight .o { color: #582800 } /* Operator */ -.highlight .x { color: #000000 } /* Other */ -.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ -.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ -.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ -.highlight .cp { color: #8f5902 } /* Comment.Preproc */ -.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ -.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ -.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ -.highlight .gd { color: #a40000 } /* Generic.Deleted */ -.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ -.highlight .ges { color: #000000 } /* Generic.EmphStrong */ -.highlight .gr { color: #ef2929 } /* Generic.Error */ -.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ -.highlight .gi { color: #00A000 } /* Generic.Inserted */ -.highlight .go { color: #888888 } /* Generic.Output */ -.highlight .gp { color: #745334 } /* Generic.Prompt */ -.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ -.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */ -.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */ -.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */ -.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */ -.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */ -.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */ -.highlight .ld { color: #000000 } /* Literal.Date */ -.highlight .m { color: #990000 } /* Literal.Number */ -.highlight .s { color: #4e9a06 } /* Literal.String */ -.highlight .na { color: #c4a000 } /* Name.Attribute */ -.highlight .nb { color: #004461 } /* Name.Builtin */ -.highlight .nc { color: #000000 } /* Name.Class */ -.highlight .no { color: #000000 } /* Name.Constant */ -.highlight .nd { color: #888888 } /* Name.Decorator */ -.highlight .ni { color: #ce5c00 } /* Name.Entity */ -.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ -.highlight .nf { color: #000000 } /* Name.Function */ -.highlight .nl { color: #f57900 } /* Name.Label */ -.highlight .nn { color: #000000 } /* Name.Namespace */ -.highlight .nx { color: #000000 } /* Name.Other */ -.highlight .py { color: #000000 } /* Name.Property */ -.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */ -.highlight .nv { color: #000000 } /* Name.Variable */ -.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */ -.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ -.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */ -.highlight .mb { color: #990000 } /* Literal.Number.Bin */ -.highlight .mf { color: #990000 } /* Literal.Number.Float */ -.highlight .mh { color: #990000 } /* Literal.Number.Hex */ -.highlight .mi { color: #990000 } /* Literal.Number.Integer */ -.highlight .mo { color: #990000 } /* Literal.Number.Oct */ -.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ -.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ -.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ -.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ -.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ -.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ -.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ -.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ -.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ -.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ -.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ -.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ -.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ -.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #000000 } /* Name.Function.Magic */ -.highlight .vc { color: #000000 } /* Name.Variable.Class */ -.highlight .vg { color: #000000 } /* Name.Variable.Global */ -.highlight .vi { color: #000000 } /* Name.Variable.Instance */ -.highlight .vm { color: #000000 } /* Name.Variable.Magic */ -.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/html/_static/searchtools.js b/docs/html/_static/searchtools.js deleted file mode 100644 index 7918c3f..0000000 --- a/docs/html/_static/searchtools.js +++ /dev/null @@ -1,574 +0,0 @@ -/* - * searchtools.js - * ~~~~~~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for the full-text search. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -/** - * Simple result scoring code. - */ -if (typeof Scorer === "undefined") { - var Scorer = { - // Implement the following function to further tweak the score for each result - // The function takes a result array [docname, title, anchor, descr, score, filename] - // and returns the new score. - /* - score: result => { - const [docname, title, anchor, descr, score, filename] = result - return score - }, - */ - - // query matches the full name of an object - objNameMatch: 11, - // or matches in the last dotted part of the object name - objPartialMatch: 6, - // Additive scores depending on the priority of the object - objPrio: { - 0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5, // used to be unimportantResults - }, - // Used when the priority is not in the mapping. - objPrioDefault: 0, - - // query found in title - title: 15, - partialTitle: 7, - // query found in terms - term: 5, - partialTerm: 2, - }; -} - -const _removeChildren = (element) => { - while (element && element.lastChild) element.removeChild(element.lastChild); -}; - -/** - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping - */ -const _escapeRegExp = (string) => - string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string - -const _displayItem = (item, searchTerms, highlightTerms) => { - const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; - const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; - const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; - const contentRoot = document.documentElement.dataset.content_root; - - const [docName, title, anchor, descr, score, _filename] = item; - - let listItem = document.createElement("li"); - let requestUrl; - let linkUrl; - if (docBuilder === "dirhtml") { - // dirhtml builder - let dirname = docName + "/"; - if (dirname.match(/\/index\/$/)) - dirname = dirname.substring(0, dirname.length - 6); - else if (dirname === "index/") dirname = ""; - requestUrl = contentRoot + dirname; - linkUrl = requestUrl; - } else { - // normal html builders - requestUrl = contentRoot + docName + docFileSuffix; - linkUrl = docName + docLinkSuffix; - } - let linkEl = listItem.appendChild(document.createElement("a")); - linkEl.href = linkUrl + anchor; - linkEl.dataset.score = score; - linkEl.innerHTML = title; - if (descr) { - listItem.appendChild(document.createElement("span")).innerHTML = - " (" + descr + ")"; - // highlight search terms in the description - if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js - highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); - } - else if (showSearchSummary) - fetch(requestUrl) - .then((responseData) => responseData.text()) - .then((data) => { - if (data) - listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) - ); - // highlight search terms in the summary - if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js - highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); - }); - Search.output.appendChild(listItem); -}; -const _finishSearch = (resultCount) => { - Search.stopPulse(); - Search.title.innerText = _("Search Results"); - if (!resultCount) - Search.status.innerText = Documentation.gettext( - "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." - ); - else - Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); -}; -const _displayNextItem = ( - results, - resultCount, - searchTerms, - highlightTerms, -) => { - // results left, load the summary and display it - // this is intended to be dynamic (don't sub resultsCount) - if (results.length) { - _displayItem(results.pop(), searchTerms, highlightTerms); - setTimeout( - () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), - 5 - ); - } - // search finished, update title and status message - else _finishSearch(resultCount); -}; - -/** - * Default splitQuery function. Can be overridden in ``sphinx.search`` with a - * custom function per language. - * - * The regular expression works by splitting the string on consecutive characters - * that are not Unicode letters, numbers, underscores, or emoji characters. - * This is the same as ``\W+`` in Python, preserving the surrogate pair area. - */ -if (typeof splitQuery === "undefined") { - var splitQuery = (query) => query - .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) - .filter(term => term) // remove remaining empty strings -} - -/** - * Search Module - */ -const Search = { - _index: null, - _queued_query: null, - _pulse_status: -1, - - htmlToText: (htmlString) => { - const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); - const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; - console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." - ); - return ""; - }, - - init: () => { - const query = new URLSearchParams(window.location.search).get("q"); - document - .querySelectorAll('input[name="q"]') - .forEach((el) => (el.value = query)); - if (query) Search.performSearch(query); - }, - - loadIndex: (url) => - (document.body.appendChild(document.createElement("script")).src = url), - - setIndex: (index) => { - Search._index = index; - if (Search._queued_query !== null) { - const query = Search._queued_query; - Search._queued_query = null; - Search.query(query); - } - }, - - hasIndex: () => Search._index !== null, - - deferQuery: (query) => (Search._queued_query = query), - - stopPulse: () => (Search._pulse_status = -1), - - startPulse: () => { - if (Search._pulse_status >= 0) return; - - const pulse = () => { - Search._pulse_status = (Search._pulse_status + 1) % 4; - Search.dots.innerText = ".".repeat(Search._pulse_status); - if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); - }; - pulse(); - }, - - /** - * perform a search for something (or wait until index is loaded) - */ - performSearch: (query) => { - // create the required interface elements - const searchText = document.createElement("h2"); - searchText.textContent = _("Searching"); - const searchSummary = document.createElement("p"); - searchSummary.classList.add("search-summary"); - searchSummary.innerText = ""; - const searchList = document.createElement("ul"); - searchList.classList.add("search"); - - const out = document.getElementById("search-results"); - Search.title = out.appendChild(searchText); - Search.dots = Search.title.appendChild(document.createElement("span")); - Search.status = out.appendChild(searchSummary); - Search.output = out.appendChild(searchList); - - const searchProgress = document.getElementById("search-progress"); - // Some themes don't use the search progress node - if (searchProgress) { - searchProgress.innerText = _("Preparing search..."); - } - Search.startPulse(); - - // index already loaded, the browser was quick! - if (Search.hasIndex()) Search.query(query); - else Search.deferQuery(query); - }, - - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - - // stem the search terms and add them to the correct list - const stemmer = new Stemmer(); - const searchTerms = new Set(); - const excludedTerms = new Set(); - const highlightTerms = new Set(); - const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); - splitQuery(query.trim()).forEach((queryTerm) => { - const queryTermLower = queryTerm.toLowerCase(); - - // maybe skip this "word" - // stopwords array is from language_data.js - if ( - stopwords.indexOf(queryTermLower) !== -1 || - queryTerm.match(/^\d+$/) - ) - return; - - // stem the word - let word = stemmer.stemWord(queryTermLower); - // select the correct list - if (word[0] === "-") excludedTerms.add(word.substr(1)); - else { - searchTerms.add(word); - highlightTerms.add(queryTermLower); - } - }); - - if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js - localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) - } - - // console.debug("SEARCH: searching for:"); - // console.info("required: ", [...searchTerms]); - // console.info("excluded: ", [...excludedTerms]); - - // array of [docname, title, anchor, descr, score, filename] - let results = []; - _removeChildren(document.getElementById("search-progress")); - - const queryLower = query.toLowerCase(); - for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { - for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) - results.push([ - docNames[file], - titles[file] !== title ? `${titles[file]} > ${title}` : title, - id !== null ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // search for explicit entries in index directives - for (const [entry, foundEntries] of Object.entries(indexEntries)) { - if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ - docNames[file], - titles[file], - id ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // lookup as object - objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) - ); - - // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); - - // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); - - // remove duplicate search results - // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept - let seen = new Set(); - results = results.reverse().reduce((acc, result) => { - let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); - if (!seen.has(resultStr)) { - acc.push(result); - seen.add(resultStr); - } - return acc; - }, []); - - results = results.reverse(); - - // for debugging - //Search.lastresults = results.slice(); // a copy - // console.info("search results:", Search.lastresults); - - // print the results - _displayNextItem(results, results.length, searchTerms, highlightTerms); - }, - - /** - * search for object names - */ - performObjectSearch: (object, objectTerms) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const objects = Search._index.objects; - const objNames = Search._index.objnames; - const titles = Search._index.titles; - - const results = []; - - const objectSearchCallback = (prefix, match) => { - const name = match[4] - const fullname = (prefix ? prefix + "." : "") + name; - const fullnameLower = fullname.toLowerCase(); - if (fullnameLower.indexOf(object) < 0) return; - - let score = 0; - const parts = fullnameLower.split("."); - - // check for different match types: exact matches of full name or - // "last name" (i.e. last dotted part) - if (fullnameLower === object || parts.slice(-1)[0] === object) - score += Scorer.objNameMatch; - else if (parts.slice(-1)[0].indexOf(object) > -1) - score += Scorer.objPartialMatch; // matches in last name - - const objName = objNames[match[1]][2]; - const title = titles[match[0]]; - - // If more than one term searched for, we require other words to be - // found in the name/title/description - const otherTerms = new Set(objectTerms); - otherTerms.delete(object); - if (otherTerms.size > 0) { - const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); - if ( - [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) - ) - return; - } - - let anchor = match[3]; - if (anchor === "") anchor = fullname; - else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; - - const descr = objName + _(", in ") + title; - - // add custom score for some objects according to scorer - if (Scorer.objPrio.hasOwnProperty(match[2])) - score += Scorer.objPrio[match[2]]; - else score += Scorer.objPrioDefault; - - results.push([ - docNames[match[0]], - fullname, - "#" + anchor, - descr, - score, - filenames[match[0]], - ]); - }; - Object.keys(objects).forEach((prefix) => - objects[prefix].forEach((array) => - objectSearchCallback(prefix, array) - ) - ); - return results; - }, - - /** - * search for full-text terms in the index - */ - performTermsSearch: (searchTerms, excludedTerms) => { - // prepare search - const terms = Search._index.terms; - const titleTerms = Search._index.titleterms; - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - - const scoreMap = new Map(); - const fileMap = new Map(); - - // perform the search on the required terms - searchTerms.forEach((word) => { - const files = []; - const arr = [ - { files: terms[word], score: Scorer.term }, - { files: titleTerms[word], score: Scorer.title }, - ]; - // add support for partial matches - if (word.length > 2) { - const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); - } - - // no match but word was a required one - if (arr.every((record) => record.files === undefined)) return; - - // found search word in contents - arr.forEach((record) => { - if (record.files === undefined) return; - - let recordFiles = record.files; - if (recordFiles.length === undefined) recordFiles = [recordFiles]; - files.push(...recordFiles); - - // set score for the word in each file - recordFiles.forEach((file) => { - if (!scoreMap.has(file)) scoreMap.set(file, {}); - scoreMap.get(file)[word] = record.score; - }); - }); - - // create the mapping - files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); - }); - }); - - // now check if the files don't contain excluded terms - const results = []; - for (const [file, wordList] of fileMap) { - // check if all requirements are matched - - // as search terms with length < 3 are discarded - const filteredTermCount = [...searchTerms].filter( - (term) => term.length > 2 - ).length; - if ( - wordList.length !== searchTerms.size && - wordList.length !== filteredTermCount - ) - continue; - - // ensure that none of the excluded terms is in the search result - if ( - [...excludedTerms].some( - (term) => - terms[term] === file || - titleTerms[term] === file || - (terms[term] || []).includes(file) || - (titleTerms[term] || []).includes(file) - ) - ) - break; - - // select one (max) score for the file. - const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); - // add result to the result list - results.push([ - docNames[file], - titles[file], - "", - null, - score, - filenames[file], - ]); - } - return results; - }, - - /** - * helper function to return a node containing the - * search summary for a given text. keywords is a list - * of stemmed words. - */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); - if (text === "") return null; - - const textLower = text.toLowerCase(); - const actualStartPosition = [...keywords] - .map((k) => textLower.indexOf(k.toLowerCase())) - .filter((i) => i > -1) - .slice(-1)[0]; - const startWithContext = Math.max(actualStartPosition - 120, 0); - - const top = startWithContext === 0 ? "" : "..."; - const tail = startWithContext + 240 < text.length ? "..." : ""; - - let summary = document.createElement("p"); - summary.classList.add("context"); - summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; - - return summary; - }, -}; - -_ready(Search.init); diff --git a/docs/html/_static/sphinx_highlight.js b/docs/html/_static/sphinx_highlight.js deleted file mode 100644 index 8a96c69..0000000 --- a/docs/html/_static/sphinx_highlight.js +++ /dev/null @@ -1,154 +0,0 @@ -/* Highlighting utilities for Sphinx HTML documentation. */ -"use strict"; - -const SPHINX_HIGHLIGHT_ENABLED = true - -/** - * highlight a given string on a node by wrapping it in - * span elements with the given class name. - */ -const _highlight = (node, addItems, text, className) => { - if (node.nodeType === Node.TEXT_NODE) { - const val = node.nodeValue; - const parent = node.parentNode; - const pos = val.toLowerCase().indexOf(text); - if ( - pos >= 0 && - !parent.classList.contains(className) && - !parent.classList.contains("nohighlight") - ) { - let span; - - const closestNode = parent.closest("body, svg, foreignObject"); - const isInSVG = closestNode && closestNode.matches("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.classList.add(className); - } - - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - const rest = document.createTextNode(val.substr(pos + text.length)); - parent.insertBefore( - span, - parent.insertBefore( - rest, - node.nextSibling - ) - ); - node.nodeValue = val.substr(0, pos); - /* There may be more occurrences of search term in this node. So call this - * function recursively on the remaining fragment. - */ - _highlight(rest, addItems, text, className); - - if (isInSVG) { - const rect = document.createElementNS( - "http://www.w3.org/2000/svg", - "rect" - ); - const bbox = parent.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute("class", className); - addItems.push({ parent: parent, target: rect }); - } - } - } else if (node.matches && !node.matches("button, select, textarea")) { - node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); - } -}; -const _highlightText = (thisNode, text, className) => { - let addItems = []; - _highlight(thisNode, addItems, text, className); - addItems.forEach((obj) => - obj.parent.insertAdjacentElement("beforebegin", obj.target) - ); -}; - -/** - * Small JavaScript module for the documentation. - */ -const SphinxHighlight = { - - /** - * highlight the search words provided in localstorage in the text - */ - highlightSearchWords: () => { - if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight - - // get and clear terms from localstorage - const url = new URL(window.location); - const highlight = - localStorage.getItem("sphinx_highlight_terms") - || url.searchParams.get("highlight") - || ""; - localStorage.removeItem("sphinx_highlight_terms") - url.searchParams.delete("highlight"); - window.history.replaceState({}, "", url); - - // get individual terms from highlight string - const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); - if (terms.length === 0) return; // nothing to do - - // There should never be more than one element matching "div.body" - const divBody = document.querySelectorAll("div.body"); - const body = divBody.length ? divBody[0] : document.querySelector("body"); - window.setTimeout(() => { - terms.forEach((term) => _highlightText(body, term, "highlighted")); - }, 10); - - const searchBox = document.getElementById("searchbox"); - if (searchBox === null) return; - searchBox.appendChild( - document - .createRange() - .createContextualFragment( - '" - ) - ); - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords: () => { - document - .querySelectorAll("#searchbox .highlight-link") - .forEach((el) => el.remove()); - document - .querySelectorAll("span.highlighted") - .forEach((el) => el.classList.remove("highlighted")); - localStorage.removeItem("sphinx_highlight_terms") - }, - - initEscapeListener: () => { - // only install a listener if it is really needed - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; - if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { - SphinxHighlight.hideSearchWords(); - event.preventDefault(); - } - }); - }, -}; - -_ready(() => { - /* Do not call highlightSearchWords() when we are on the search page. - * It will highlight words from the *previous* search query. - */ - if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); - SphinxHighlight.initEscapeListener(); -}); diff --git a/docs/html/genindex.html b/docs/html/genindex.html deleted file mode 100644 index 172229c..0000000 --- a/docs/html/genindex.html +++ /dev/null @@ -1,240 +0,0 @@ - - - - - - - Index — warpmesh 0.1 documentation - - - - - - - - - - - - - - - - -
-
- -
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/html/index.html b/docs/html/index.html deleted file mode 100644 index a52d00a..0000000 --- a/docs/html/index.html +++ /dev/null @@ -1,333 +0,0 @@ - - - - - - - - WarpMesh: A Machin Learning Based Mesh Movement Package — warpmesh 0.1 documentation - - - - - - - - - - - - - - - - -
-
-
- - -
- -
-

WarpMesh: A Machin Learning Based Mesh Movement Package

-
-

WarpMesh package docs

-
-
-
caption:
-

Contents:

-
-
-
-
-
-class processor.MeshProcessor(original_mesh, optimal_mesh, function_space, use_4_edge=True, feature={'grad_uh': None, 'uh': None}, raw_feature={'hessian_norm': None, 'uh': None}, dist_params={'n_dist': None, 'use_iso': None, 'w': None, 'z': None, 'μ_x': None, 'μ_y': None, 'σ_x': None, 'σ_y': None})
-
-
MeshProcessor class for pre-processing mesh data, attaching features to

nodes,

-
-
-

and converting them to training data.

-

Parameters: -- original_mesh: The initial mesh. -- optimal_mesh: The optimal mesh after adaptation. -- function_space: The function space over which the mesh is defined. -- use_4_edge: Whether to use four edges for finding boundaries. -- feature: Dictionary containing features like ‘uh’, ‘grad_uh’ etc. -- raw_feature: Dictionary containing raw features like ‘uh’, ‘hessian_norm

-
-

etc.

-
-
    -
  • dist_params: Dictionary containing distribution parameters.

  • -
-

Attributes: -- dist_params: Distribution parameters. -- mesh: The original mesh. -- optimal_mesh: The optimal mesh. -- function_space: The function space. -- feature: The attached features. -- raw_feature: The raw features. -- coordinates: The coordinates of the original mesh. -- optimal_coordinates: The coordinates of the optimal mesh. -- cell_node_list: The list of nodes for each cell. -- num_nodes: The number of nodes in each cell.

-
-
-attach_feature()
-
-
Attach features to nodes of the mesh. The features to be attached are

specified

-
-
-

in the ‘feature’ attribute.

-
- -
-
-find_bd()
-
-
Identify the boundary nodes of the mesh and update various boundary

masks.

-
-
-
- -
-
-find_edges()
-

Find the edges of the mesh and update the ‘edges’ attribute.

-
- -
-
-get_conv_feat(fix_reso_x=20, fix_reso_y=20)
-

Generate features for convolution. This involves grid spacing and other -related features.

-
- -
-
-save_taining_data(path)
-

Save the processed data into disk for future use.

-

Parameters: -- path: The directory where to save the data.

-
- -
-
-to_train_data()
-

Convert mesh and associated features to PyTorch Geometric Data format. -This can be used directly for machine learning training.

-
- -
- -
-
-class loader.MeshData(*args: Any, **kwargs: Any)
-

Custom PyTorch Data object designed to handle mesh data features.

-

This class is intended to be used as the base class of data samples -returned by the MeshDataset.

-
- -
-
-class loader.MeshDataset(*args: Any, **kwargs: Any)
-

Dataset for mesh-based data.

-
-
-x_feature
-

List of feature names for node features.

-
-
Type:
-

list

-
-
-
- -
-
-mesh_feature
-

List of feature names for mesh features.

-
-
Type:
-

list

-
-
-
- -
-
-conv_feature
-

List of feature names for convolution features.

-
-
Type:
-

list

-
-
-
- -
-
-file_names
-

List of filenames containing mesh data.

-
-
Type:
-

list

-
-
-
- -
-
-get_conv_feature(data)
-

Extracts and concatenates the conv_features from the data.

-
-
Parameters:
-

data (dict) – The data dictionary loaded from a .npy file.

-
-
Returns:
-

The concatenated conv_features.

-
-
Return type:
-

tensor

-
-
-
- -
-
-get_conv_feature_fix(data)
-

Extracts and concatenates the conv_features from the data.

-
-
Parameters:
-

data (dict) – The data dictionary loaded from a .npy file.

-
-
Returns:
-

The concatenated conv_features.

-
-
Return type:
-

tensor

-
-
-
- -
-
-get_mesh_feature(data)
-

Extracts and concatenates the mesh_features from the data.

-
-
Parameters:
-

data (dict) – The data dictionary loaded from a .npy file.

-
-
Returns:
-

The concatenated mesh_features.

-
-
Return type:
-

tensor

-
-
-
- -
-
-get_x_feature(data)
-

Extracts and concatenates the x_features for each node from the data.

-
-
Parameters:
-

data (dict) – The data dictionary loaded from a .npy file.

-
-
Returns:
-

The concatenated x_features for each node.

-
-
Return type:
-

tensor

-
-
-
- -
- -
-
-loader.normalise(data)
-

Normalizes the mesh and convolution features of a given MeshData object.

-
-
Parameters:
-

data (MeshData) – The MeshData object containing features to normalize.

-
-
Returns:
-

The MeshData object with normalized features.

-
-
Return type:
-

MeshData

-
-
-
- -
-
- - -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/html/objects.inv b/docs/html/objects.inv deleted file mode 100644 index c942630..0000000 Binary files a/docs/html/objects.inv and /dev/null differ diff --git a/docs/html/py-modindex.html b/docs/html/py-modindex.html deleted file mode 100644 index 968778d..0000000 --- a/docs/html/py-modindex.html +++ /dev/null @@ -1,125 +0,0 @@ - - - - - - - Python Module Index — warpmesh 0.1 documentation - - - - - - - - - - - - - - - - - - - - - -
-
-
- - -
- - -

Python Module Index

- -
- l | - p -
- - - - - - - - - - - - -
 
- l
- loader -
 
- p
- processor -
- - -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/html/search.html b/docs/html/search.html deleted file mode 100644 index 42089c4..0000000 --- a/docs/html/search.html +++ /dev/null @@ -1,117 +0,0 @@ - - - - - - - Search — warpmesh 0.1 documentation - - - - - - - - - - - - - - - - - - - - - - -
-
-
- - -
- -

Search

- - - - -

- Searching for multiple words only shows matches that contain - all words. -

- - -
- - - -
- - - -
- -
- - -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/html/searchindex.js b/docs/html/searchindex.js deleted file mode 100644 index 3ada045..0000000 --- a/docs/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["WarpMesh: A Machin Learning Based Mesh Movement Package"], "terms": {"caption": 0, "content": 0, "class": 0, "gener": 0, "meshgener": [], "param": [], "helmholtz_eq": [], "none": 0, "num_grid_x": [], "num_grid_i": [], "respons": [], "move": [], "given": 0, "helmholtz": [], "equat": [], "thi": 0, "method": [], "i": 0, "pyroteu": [], "attribut": 0, "eq": [], "The": 0, "object": 0, "number": 0, "grid": 0, "point": [], "x": [], "dimens": [], "y": [], "initi": 0, "get_hessian": [], "comput": [], "return": 0, "hessian": [], "paramet": 0, "which": 0, "project": [], "function": 0, "space": 0, "get_jacobian": [], "jacobian": [], "get_jacobian_det": [], "determin": [], "get_monitor_v": [], "monitor": [], "valu": [], "us": 0, "monitor_func": [], "move_mesh": [], "mong": [], "amper": [], "store": [], "its": [], "randomhelmholtzgener": [], "simple_u": [], "fals": [], "dist_param": 0, "max_dist": [], "10": [], "n_dist": 0, "w_max": [], "0": [], "2": [], "w_min": [], "05": [], "x_end": [], "1": [], "x_start": [], "y_end": [], "y_start": [], "z_max": [], "z_min": [], "random": [], "gaussian": [], "distribut": 0, "flag": [], "simpler": [], "form": [], "u": [], "type": 0, "bool": [], "dict": 0, "u_exact": [], "analyt": [], "solut": [], "f": [], "simul": [], "sourc": [], "function_spac": 0, "problem": [], "lh": [], "left": [], "hand": [], "side": [], "rh": [], "right": [], "bc": [], "dirichlet": [], "boundari": 0, "condit": [], "isotrip": [], "dataset": 0, "true": 0, "default": [], "discretis": [], "firedrak": [], "unitsquaremesh": [], "discret": [], "construct": [], "dictionari": 0, "get_dist_param": [], "contain": 0, "set_dist_param": [], "set": [], "from": 0, "processor": 0, "meshprocessor": 0, "original_mesh": 0, "optimal_mesh": 0, "use_4_edg": 0, "featur": 0, "grad_uh": 0, "uh": 0, "raw_featur": 0, "hessian_norm": 0, "w": 0, "z": 0, "\u03bc_x": 0, "\u03bc_y": 0, "\u03c3_x": 0, "\u03c3_y": 0, "pre": 0, "process": 0, "data": 0, "attach": 0, "node": 0, "convert": 0, "them": 0, "train": 0, "optim": 0, "after": 0, "adapt": 0, "over": 0, "defin": 0, "whether": 0, "four": 0, "edg": 0, "find": 0, "like": 0, "etc": 0, "raw": 0, "origin": 0, "coordin": 0, "optimal_coordin": 0, "cell_node_list": 0, "list": 0, "each": 0, "cell": 0, "num_nod": 0, "attach_featur": 0, "ar": 0, "specifi": 0, "find_bd": 0, "identifi": 0, "updat": 0, "variou": 0, "mask": 0, "find_edg": 0, "get_conv_feat": 0, "convolut": 0, "involv": 0, "other": 0, "relat": 0, "save_taining_data": 0, "path": 0, "save": 0, "disk": 0, "futur": 0, "directori": 0, "where": 0, "to_train_data": 0, "associ": 0, "pytorch": 0, "geometr": 0, "format": 0, "can": 0, "directli": 0, "model": [], "globalfeatextractor": [], "arg": 0, "ani": 0, "kwarg": 0, "custom": 0, "layer": [], "global": [], "extract": 0, "emploi": [], "multipl": [], "dropout": [], "conv1": [], "conv2": [], "conv3": [], "conv4": [], "torch": [], "nn": [], "conv2d": [], "final_pool": [], "final": [], "pool": [], "adaptiveavgpool2d": [], "forward": [], "pass": [], "tensor": 0, "input": [], "localfeatextractor": [], "perform": [], "local": [], "graph": [], "structur": [], "extend": [], "torch_geometr": [], "messagepass": [], "addit": [], "aggreg": [], "messag": [], "scheme": [], "lin_1": [], "first": [], "linear": [], "lin_2": [], "second": [], "lin_3": [], "third": [], "activ": [], "selu": [], "num_feat": [], "int": [], "per": [], "out": [], "output": [], "edge_index": [], "indic": [], "mrn": [], "refin": [], "network": [], "implement": [], "recurr": [], "deform": [], "num_loop": [], "loop": [], "gfe_out_c": [], "channel": [], "extractor": [], "lfe_out_c": [], "hidden_s": [], "size": [], "hidden": [], "gfe": [], "lfe": [], "lin": [], "transform": [], "gat": [], "block": [], "recurrentgatconv": [], "gfe_in_c": [], "lfe_in_c": [], "deform_in_c": [], "info": [], "coord": [], "num_step": [], "accord": [], "step": [], "attent": [], "to_hidden": [], "gatv2conv": [], "to_coord": [], "sequenti": [], "tanglecount": [], "count": [], "tangl": [], "evalu": [], "loader": 0, "devic": [], "loss_func": [], "use_jacob": [], "loss": [], "dataload": [], "modul": [], "run": [], "callabl": [], "e": [], "g": [], "mse": [], "cross": [], "entropi": [], "averag": [], "across": [], "all": [], "batch": [], "float": [], "load_model": [], "weight_path": [], "load": 0, "weight": [], "file": 0, "str": [], "adam": [], "sgd": [], "meshdata": 0, "edge_attr": [], "po": [], "design": 0, "handl": 0, "intend": 0, "sampl": 0, "meshdataset": 0, "file_dir": [], "target_transform": [], "x_featur": 0, "bd_mask": [], "bd_left_mask": [], "bd_right_mask": [], "bd_down_mask": [], "bd_up_mask": [], "mesh_featur": 0, "conv_featur": 0, "conv_uh": [], "load_analyt": [], "load_jacobian": [], "name": 0, "file_nam": 0, "filenam": 0, "get_conv_featur": 0, "concaten": 0, "npy": 0, "get_mesh_featur": 0, "get_x_featur": 0, "normalis": 0, "normal": 0, "use_iso": 0, "fix_reso_x": 0, "20": 0, "fix_reso_i": 0, "deprec": [], "do": [], "option": [], "unless": [], "you": [], "know": [], "what": [], "use_inversion_loss": [], "scaler": [], "100": [], "conv_feature_fix": [], "conv_uh_fix": [], "get_conv_feature_fix": 0, "use_inversion_diff_loss": [], "use_area_loss": [], "use_clust": [], "r": [], "25": []}, "objects": {"": [[0, 0, 0, "-", "loader"], [0, 0, 0, "-", "processor"]], "loader": [[0, 1, 1, "", "MeshData"], [0, 1, 1, "", "MeshDataset"], [0, 4, 1, "", "normalise"]], "loader.MeshDataset": [[0, 2, 1, "", "conv_feature"], [0, 2, 1, "", "file_names"], [0, 3, 1, "", "get_conv_feature"], [0, 3, 1, "", "get_conv_feature_fix"], [0, 3, 1, "", "get_mesh_feature"], [0, 3, 1, "", "get_x_feature"], [0, 2, 1, "", "mesh_feature"], [0, 2, 1, "", "x_feature"]], "processor": [[0, 1, 1, "", "MeshProcessor"]], "processor.MeshProcessor": [[0, 3, 1, "", "attach_feature"], [0, 3, 1, "", "find_bd"], [0, 3, 1, "", "find_edges"], [0, 3, 1, "", "get_conv_feat"], [0, 3, 1, "", "save_taining_data"], [0, 3, 1, "", "to_train_data"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "function", "Python function"]}, "titleterms": {"warpmesh": 0, "A": 0, "machin": 0, "learn": 0, "base": 0, "mesh": 0, "movement": 0, "packag": 0, "doc": 0}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 60}, "alltitles": {"WarpMesh: A Machin Learning Based Mesh Movement Package": [[0, "warpmesh-a-machin-learning-based-mesh-movement-package"]], "WarpMesh package docs": [[0, "warpmesh-package-docs"]]}, "indexentries": {"meshdata (class in loader)": [[0, "loader.MeshData"]], "meshdataset (class in loader)": [[0, "loader.MeshDataset"]], "meshprocessor (class in processor)": [[0, "processor.MeshProcessor"]], "attach_feature() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.attach_feature"]], "conv_feature (loader.meshdataset attribute)": [[0, "loader.MeshDataset.conv_feature"]], "file_names (loader.meshdataset attribute)": [[0, "loader.MeshDataset.file_names"]], "find_bd() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.find_bd"]], "find_edges() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.find_edges"]], "get_conv_feat() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.get_conv_feat"]], "get_conv_feature() (loader.meshdataset method)": [[0, "loader.MeshDataset.get_conv_feature"]], "get_conv_feature_fix() (loader.meshdataset method)": [[0, "loader.MeshDataset.get_conv_feature_fix"]], "get_mesh_feature() (loader.meshdataset method)": [[0, "loader.MeshDataset.get_mesh_feature"]], "get_x_feature() (loader.meshdataset method)": [[0, "loader.MeshDataset.get_x_feature"]], "loader": [[0, "module-loader"]], "mesh_feature (loader.meshdataset attribute)": [[0, "loader.MeshDataset.mesh_feature"]], "module": [[0, "module-loader"], [0, "module-processor"]], "normalise() (in module loader)": [[0, "loader.normalise"]], "processor": [[0, "module-processor"]], "save_taining_data() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.save_taining_data"]], "to_train_data() (processor.meshprocessor method)": [[0, "processor.MeshProcessor.to_train_data"]], "x_feature (loader.meshdataset attribute)": [[0, "loader.MeshDataset.x_feature"]]}}) \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index cc4fb91..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. # Author: Chunyang Wang -.. # GitHub Username: acse-cw1722 - -WarpMesh: A Machin Learning Based Mesh Movement Package -******************************************************** - -WarpMesh package docs -======================================================== - -============ -.. toctree:: - :maxdepth: 2 - :caption: Contents: - -.. automodule:: generator - :members: MeshGenerator, HelmholtzSolver, RandomHelmholtzGenerator - -.. automodule:: processor - :members: MeshProcessor - -.. automodule:: model - :members: MRN, GlobalFeatExtractor, LocalFeatExtractor, RecurrentGATConv, train, evaluate, load_model, TangleCounter - -.. automodule:: loader - :members: MeshDataset, MeshData, normalise \ No newline at end of file diff --git a/inference_utils.py b/inference_utils.py index 01e0186..cbfad6a 100644 --- a/inference_utils.py +++ b/inference_utils.py @@ -5,7 +5,7 @@ import torch from firedrake.cython.dmcommon import facet_closure_nodes -import warpmesh as wm +import UM2N device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -227,20 +227,20 @@ def load_model(run, config, epoch, experiment_dir): assert model_file is not None, "Model file not found" model = None if config.model_used == "M2N": - model = wm.M2N( + model = UM2N.M2N( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, ) elif config.model_used == "MRN": - model = wm.MRN( + model = UM2N.MRN( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRT" or config.model_used == "MRTransformer": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -255,7 +255,7 @@ def load_model(run, config, epoch, experiment_dir): device=device, ) elif config.model_used == "M2T": - model = wm.M2T( + model = UM2N.M2T( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -271,7 +271,7 @@ def load_model(run, config, epoch, experiment_dir): device=device, ) elif config.model_used == "M2N_T": - model = wm.M2N_T( + model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, @@ -279,5 +279,5 @@ def load_model(run, config, epoch, experiment_dir): else: print("Model not found") model_file_path = os.path.join(experiment_dir, target_file_name) - model = wm.load_model(model, model_file_path) + model = UM2N.load_model(model, model_file_path) return model diff --git a/install.sh b/install.sh index d7bc346..3b4b6f5 100755 --- a/install.sh +++ b/install.sh @@ -48,5 +48,5 @@ green_log "Downloading Movement..." git clone https://github.com/mesh-adaptation/movement.git ${VIRTUAL_ENV}/src/movement pip install -e ${VIRTUAL_ENV}/src/movement -# Install WarpMesh +# Install UM2N pip install -e ${UM2N_ROOT} diff --git a/install_gpu.sh b/install_gpu.sh index a8ce607..ad4b84c 100755 --- a/install_gpu.sh +++ b/install_gpu.sh @@ -58,5 +58,5 @@ green_log "Downloading Movement..." git clone https://github.com/mesh-adaptation/movement.git ${VIRTUAL_ENV}/src/movement pip install -e ${VIRTUAL_ENV}/src/movement -# Install WarpMesh +# Install UM2N pip install -e ${UM2N_ROOT} diff --git a/pyproject.toml b/pyproject.toml index 3333995..8171dd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = ["setuptools"] [project] -name = "warpmesh" +name = "UM2N" version = "1.0" dependencies = [ "einops", @@ -43,7 +43,7 @@ Homepage = "https://erizmr.github.io/UM2N" Repository = "https://github.com/erizmr/UM2N" [tool.setuptools] -packages = ["warpmesh"] +packages = ["UM2N"] [tool.ruff] line-length = 88 diff --git a/run_train.py b/run_train.py index a88874f..8bdbc3c 100644 --- a/run_train.py +++ b/run_train.py @@ -16,9 +16,9 @@ import wandb from torch_geometric.data import DataLoader -from warpmesh.helper import load_yaml_to_namespace, save_namespace_to_yaml -from warpmesh.loader import AggreateDataset, MeshDataset, normalise -from warpmesh.model import ( +from UM2N.helper import load_yaml_to_namespace, save_namespace_to_yaml +from UM2N.loader import AggreateDataset, MeshDataset, normalise +from UM2N.model import ( M2N, MRN, M2N_dynamic_drop, @@ -41,7 +41,7 @@ parser = argparse.ArgumentParser( - prog="Warpmesh", description="warp the mesh", epilog="warp the mesh" + prog="UM2N", description="warp the mesh", epilog="warp the mesh" ) parser.add_argument("-config", default="", type=str, required=True) args = parser.parse_args() diff --git a/run_train_baselines.py b/run_train_baselines.py index 7a4e463..1b214b4 100644 --- a/run_train_baselines.py +++ b/run_train_baselines.py @@ -15,9 +15,9 @@ import wandb from torch_geometric.data import DataLoader -from warpmesh.helper import load_yaml_to_namespace, save_namespace_to_yaml -from warpmesh.loader import AggreateDataset, MeshDataset, normalise -from warpmesh.model import ( +from UM2N.helper import load_yaml_to_namespace, save_namespace_to_yaml +from UM2N.loader import AggreateDataset, MeshDataset, normalise +from UM2N.model import ( M2N, M2N_T, M2T, @@ -36,7 +36,7 @@ parser = argparse.ArgumentParser( - prog="Warpmesh", description="warp the mesh", epilog="warp the mesh" + prog="UM2N", description="warp the mesh", epilog="warp the mesh" ) parser.add_argument("-config", default="", type=str, required=True) args = parser.parse_args() @@ -117,7 +117,7 @@ # =================== load from checkpoint ========================== if hasattr(config, "use_pre_train") and config.use_pre_train: - import warpmesh as wm + import UM2N # Load from checkpoint entity = "mz-team" @@ -153,7 +153,7 @@ # model_dict.update(pretrained_dict) # model.load_state_dict(model_dict) - model = wm.load_model(model, model_file_path, strict=False) + model = UM2N.load_model(model, model_file_path, strict=False) print(f"Model {run_id} checkpoint loaded.") else: print("No pre-train. Train from scratch.") diff --git a/run_train_fine_tune.py b/run_train_fine_tune.py index afbc45d..d684b1e 100644 --- a/run_train_fine_tune.py +++ b/run_train_fine_tune.py @@ -15,10 +15,10 @@ import wandb from torch_geometric.data import DataLoader -import warpmesh as wm -from warpmesh.helper import load_yaml_to_namespace, save_namespace_to_yaml -from warpmesh.loader import AggreateDataset, MeshDataset, normalise -from warpmesh.model import ( +import UM2N +from UM2N.helper import load_yaml_to_namespace, save_namespace_to_yaml +from UM2N.loader import AggreateDataset, MeshDataset, normalise +from UM2N.model import ( MRTransformer, evaluate_unsupervised, train_unsupervised, @@ -31,7 +31,7 @@ parser = argparse.ArgumentParser( - prog="Warpmesh", description="warp the mesh", epilog="warp the mesh" + prog="UM2N", description="warp the mesh", epilog="warp the mesh" ) parser.add_argument("-config", default="", type=str, required=True) args = parser.parse_args() @@ -91,7 +91,7 @@ target_file_name = file.name assert model_file is not None, "Model file not found" model_file_path = os.path.join(model_store_path, target_file_name) -model = wm.load_model(model, model_file_path, strict=False) +model = UM2N.load_model(model, model_file_path, strict=False) print("Model checkpoint loaded.") # =================================================================== diff --git a/script/build_burgers_square.py b/script/build_burgers_square.py index 2103eb2..287ae06 100644 --- a/script/build_burgers_square.py +++ b/script/build_burgers_square.py @@ -10,7 +10,7 @@ import matplotlib.pyplot as plt import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -300,7 +300,7 @@ def sample_from_loop( ): global i print("before processing") - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh_og, optimal_mesh=mesh_new, function_space=function_space, @@ -402,7 +402,7 @@ def sample_from_loop( mesh_new = None mesh_fine = None if mesh_type != 0: - unstructure_square_mesh_gen = wm.UnstructuredSquareMesh( + unstructure_square_mesh_gen = UM2N.UnstructuredSquareMesh( scale=scale_x, mesh_type=mesh_type ) # noqa mesh = unstructure_square_mesh_gen.get_mesh( @@ -418,7 +418,7 @@ def sample_from_loop( mesh_fine = fd.UnitSquareMesh(100, 100) # Generate Random solution field gaussian_list, nu = get_sample_param_of_nu_generalization_by_idx_train(idx) # noqa - solver = wm.BurgersSolver( + solver = UM2N.BurgersSolver( mesh, mesh_fine, mesh_new, gauss_list=gaussian_list, nu=nu, idx=idx ) solver.solve_problem(sample_from_loop) diff --git a/script/build_helmholtz_poly.py b/script/build_helmholtz_poly.py index 75cbec6..1d11c87 100644 --- a/script/build_helmholtz_poly.py +++ b/script/build_helmholtz_poly.py @@ -12,7 +12,7 @@ import numpy as np import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -216,13 +216,13 @@ def move_data(target, source, start, num_file): while i < n_samples: try: print("Generating Sample: " + str(i)) - rand_poly_mesh_gen = wm.RandPolyMesh(scale=scale_x, mesh_type=mesh_type) # noqa + rand_poly_mesh_gen = UM2N.RandPolyMesh(scale=scale_x, mesh_type=mesh_type) # noqa mesh = rand_poly_mesh_gen.get_mesh( res=lc, file_path=os.path.join(problem_mesh_dir, f"mesh{i}.msh") ) num_boundary = rand_poly_mesh_gen.num_boundary # Generate Random solution field - rand_u_generator = wm.RandSourceGenerator( + rand_u_generator = UM2N.RandSourceGenerator( use_iso=use_iso, dist_params={ "max_dist": max_dist, @@ -239,11 +239,11 @@ def move_data(target, source, start, num_file): "c_max": c_max, }, ) - helmholtz_eq = wm.RandHelmholtzEqGenerator(rand_u_generator) + helmholtz_eq = UM2N.RandHelmholtzEqGenerator(rand_u_generator) res = helmholtz_eq.discretise(mesh) # discretise the equation dist_params = rand_u_generator.get_dist_params() # Solve the equation - solver = wm.EquationSolver( + solver = UM2N.EquationSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -255,7 +255,7 @@ def move_data(target, source, start, num_file): f = fd.interpolate(helmholtz_eq.f, helmholtz_eq.function_space) uh = solver.solve_eq() # Generate Mesh - hessian = wm.MeshGenerator( + hessian = UM2N.MeshGenerator( params={ "eq": helmholtz_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -264,7 +264,7 @@ def move_data(target, source, start, num_file): } ).get_hessian(mesh) - hessian_norm = wm.MeshGenerator( + hessian_norm = UM2N.MeshGenerator( params={ "eq": helmholtz_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -278,7 +278,7 @@ def move_data(target, source, start, num_file): func_vec_space = fd.VectorFunctionSpace(mesh, "CG", 1) grad_uh_interpolate = fd.interpolate(fd.grad(uh), func_vec_space) - mesh_gen = wm.MeshGenerator( + mesh_gen = UM2N.MeshGenerator( params={ "eq": helmholtz_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -304,7 +304,7 @@ def move_data(target, source, start, num_file): # solve the equation on the new mesh new_res = helmholtz_eq.discretise(new_mesh) - new_solver = wm.EquationSolver( + new_solver = UM2N.EquationSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -315,7 +315,7 @@ def move_data(target, source, start, num_file): uh_new = new_solver.solve_eq() # process the data for training - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], diff --git a/script/build_helmholtz_square.py b/script/build_helmholtz_square.py index 75e2401..447368d 100644 --- a/script/build_helmholtz_square.py +++ b/script/build_helmholtz_square.py @@ -11,7 +11,7 @@ import matplotlib.pyplot as plt import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -234,7 +234,7 @@ def move_data(target, source, start, num_file): try: print("Generating Sample: " + str(i)) if mesh_type != 0: - unstructure_square_mesh_gen = wm.UnstructuredSquareMesh( + unstructure_square_mesh_gen = UM2N.UnstructuredSquareMesh( scale=scale_x, mesh_type=mesh_type ) # noqa mesh = unstructure_square_mesh_gen.get_mesh( @@ -246,7 +246,7 @@ def move_data(target, source, start, num_file): mesh = fd.UnitSquareMesh(n_grid, n_grid) # Generate Random solution field - rand_u_generator = wm.RandSourceGenerator( + rand_u_generator = UM2N.RandSourceGenerator( use_iso=use_iso, dist_params={ "max_dist": max_dist, @@ -266,11 +266,11 @@ def move_data(target, source, start, num_file): "sigma_eps": sigma_eps, }, ) - helmholtz_eq = wm.RandHelmholtzEqGenerator(rand_u_generator) + helmholtz_eq = UM2N.RandHelmholtzEqGenerator(rand_u_generator) res = helmholtz_eq.discretise(mesh) # discretise the equation dist_params = rand_u_generator.get_dist_params() # Solve the equation - solver = wm.EquationSolver( + solver = UM2N.EquationSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -284,21 +284,21 @@ def move_data(target, source, start, num_file): # plt.show() uh = solver.solve_eq() # Generate Mesh - hessian = wm.MeshGenerator( + hessian = UM2N.MeshGenerator( params={"eq": helmholtz_eq, "mesh": mesh} ).get_hessian(mesh) - hessian_norm = wm.MeshGenerator( + hessian_norm = UM2N.MeshGenerator( params={"eq": helmholtz_eq, "mesh": mesh} ).get_hessian_norm(mesh) hessian_norm = fd.project(hessian_norm, fd.FunctionSpace(mesh, "CG", 1)) # Get monitor val - monitor_val = wm.MeshGenerator( + monitor_val = UM2N.MeshGenerator( params={"eq": helmholtz_eq, "mesh": mesh} ).monitor_func(mesh) - # grad_uh_norm = wm.MeshGenerator( + # grad_uh_norm = UM2N.MeshGenerator( # params={ # "eq": helmholtz_eq, # "mesh": fd.Mesh( @@ -315,7 +315,7 @@ def move_data(target, source, start, num_file): grad_norm /= grad_norm.vector().max() grad_uh_norm = grad_norm - mesh_gen = wm.MeshGenerator(params={"eq": helmholtz_eq, "mesh": mesh}) + mesh_gen = UM2N.MeshGenerator(params={"eq": helmholtz_eq, "mesh": mesh}) start = time.perf_counter() new_mesh = mesh_gen.move_mesh() # noqa @@ -343,7 +343,7 @@ def move_data(target, source, start, num_file): # solve the equation on the new mesh new_res = helmholtz_eq.discretise(new_mesh) - new_solver = wm.EquationSolver( + new_solver = UM2N.EquationSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -354,7 +354,7 @@ def move_data(target, source, start, num_file): uh_new = new_solver.solve_eq() # process the data for training - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], diff --git a/script/build_poisson_poly.py b/script/build_poisson_poly.py index a7405a0..03a0fcf 100644 --- a/script/build_poisson_poly.py +++ b/script/build_poisson_poly.py @@ -12,7 +12,7 @@ import numpy as np import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -216,13 +216,13 @@ def move_data(target, source, start, num_file): while i < n_samples: try: print("Generating Sample: " + str(i)) - rand_poly_mesh_gen = wm.RandPolyMesh(scale=scale_x, mesh_type=mesh_type) # noqa + rand_poly_mesh_gen = UM2N.RandPolyMesh(scale=scale_x, mesh_type=mesh_type) # noqa mesh = rand_poly_mesh_gen.get_mesh( res=lc, file_path=os.path.join(problem_mesh_dir, f"mesh{i}.msh") ) num_boundary = rand_poly_mesh_gen.num_boundary # Generate Random solution field - rand_u_generator = wm.RandSourceGenerator( + rand_u_generator = UM2N.RandSourceGenerator( use_iso=use_iso, dist_params={ "max_dist": max_dist, @@ -239,11 +239,11 @@ def move_data(target, source, start, num_file): "c_max": c_max, }, ) - poisson_eq = wm.RandPoissonEqGenerator(rand_u_generator) + poisson_eq = UM2N.RandPoissonEqGenerator(rand_u_generator) res = poisson_eq.discretise(mesh) # discretise the equation dist_params = rand_u_generator.get_dist_params() # Solve the equation - solver = wm.EquationSolver( + solver = UM2N.EquationSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -253,7 +253,7 @@ def move_data(target, source, start, num_file): ) uh = solver.solve_eq() # Generate Mesh - hessian = wm.MeshGenerator( + hessian = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -262,7 +262,7 @@ def move_data(target, source, start, num_file): } ).get_hessian(mesh) - hessian_norm = wm.MeshGenerator( + hessian_norm = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -276,7 +276,7 @@ def move_data(target, source, start, num_file): func_vec_space = fd.VectorFunctionSpace(mesh, "CG", 1) grad_uh_interpolate = fd.interpolate(fd.grad(uh), func_vec_space) - mesh_gen = wm.MeshGenerator( + mesh_gen = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": rand_poly_mesh_gen.get_mesh( @@ -302,7 +302,7 @@ def move_data(target, source, start, num_file): # solve the equation on the new mesh new_res = poisson_eq.discretise(new_mesh) - new_solver = wm.EquationSolver( + new_solver = UM2N.EquationSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -313,7 +313,7 @@ def move_data(target, source, start, num_file): uh_new = new_solver.solve_eq() # process the data for training - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], diff --git a/script/build_poisson_square.py b/script/build_poisson_square.py index d5c424d..0051279 100644 --- a/script/build_poisson_square.py +++ b/script/build_poisson_square.py @@ -11,7 +11,7 @@ import matplotlib.pyplot as plt import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -214,14 +214,14 @@ def move_data(target, source, start, num_file): while i < n_samples: try: print("Generating Sample: " + str(i)) - unstructure_square_mesh_gen = wm.UnstructuredSquareMesh( + unstructure_square_mesh_gen = UM2N.UnstructuredSquareMesh( scale=scale_x, mesh_type=mesh_type ) # noqa mesh = unstructure_square_mesh_gen.get_mesh( res=lc, file_path=os.path.join(problem_mesh_dir, f"mesh{i}.msh") ) # Generate Random solution field - rand_u_generator = wm.RandSourceGenerator( + rand_u_generator = UM2N.RandSourceGenerator( use_iso=use_iso, dist_params={ "max_dist": max_dist, @@ -238,11 +238,11 @@ def move_data(target, source, start, num_file): "c_max": c_max, }, ) - poisson_eq = wm.RandPoissonEqGenerator(rand_u_generator) + poisson_eq = UM2N.RandPoissonEqGenerator(rand_u_generator) res = poisson_eq.discretise(mesh) # discretise the equation dist_params = rand_u_generator.get_dist_params() # Solve the equation - solver = wm.EquationSolver( + solver = UM2N.EquationSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -252,14 +252,14 @@ def move_data(target, source, start, num_file): ) uh = solver.solve_eq() # Generate Mesh - hessian = wm.MeshGenerator( + hessian = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": fd.Mesh(os.path.join(problem_mesh_dir, f"mesh{i}.msh")), # noqa } ).get_hessian(mesh) - hessian_norm = wm.MeshGenerator( + hessian_norm = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": fd.Mesh(os.path.join(problem_mesh_dir, f"mesh{i}.msh")), # noqa @@ -271,7 +271,7 @@ def move_data(target, source, start, num_file): func_vec_space = fd.VectorFunctionSpace(mesh, "CG", 1) grad_uh_interpolate = fd.interpolate(fd.grad(uh), func_vec_space) - mesh_gen = wm.MeshGenerator( + mesh_gen = UM2N.MeshGenerator( params={ "eq": poisson_eq, "mesh": fd.Mesh(os.path.join(problem_mesh_dir, f"mesh{i}.msh")), # noqa @@ -301,7 +301,7 @@ def move_data(target, source, start, num_file): # solve the equation on the new mesh new_res = poisson_eq.discretise(new_mesh) - new_solver = wm.EquationSolver( + new_solver = UM2N.EquationSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -312,7 +312,7 @@ def move_data(target, source, start, num_file): uh_new = new_solver.solve_eq() # process the data for training - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], diff --git a/script/build_swirl.py b/script/build_swirl.py index 49ec794..3c9ebf1 100644 --- a/script/build_swirl.py +++ b/script/build_swirl.py @@ -9,7 +9,7 @@ import matplotlib.pyplot as plt import pandas as pd -import warpmesh as wm +import UM2N def arg_parse(): @@ -230,7 +230,7 @@ def sample_from_loop( """ global i print("before processing") - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh_og, optimal_mesh=mesh_new, function_space=function_space, @@ -418,7 +418,7 @@ def sample_from_loop( mesh_fine = None mesh_new = None if mesh_type != 0: - mesh_gen = wm.UnstructuredSquareMesh(mesh_type=mesh_type) + mesh_gen = UM2N.UnstructuredSquareMesh(mesh_type=mesh_type) mesh = mesh_gen.get_mesh( res=lc, file_path=os.path.join(problem_mesh_dir, "mesh.msh") ) @@ -428,7 +428,7 @@ def sample_from_loop( mesh_model = mesh_gen.get_mesh( res=lc, file_path=os.path.join(problem_mesh_dir, "mesh.msh") ) - mesh_gen_fine = wm.UnstructuredSquareMesh(mesh_type=mesh_type) + mesh_gen_fine = UM2N.UnstructuredSquareMesh(mesh_type=mesh_type) mesh_fine = mesh_gen_fine.get_mesh( res=1e-2, file_path=os.path.join(problem_mesh_fine_dir, "mesh.msh") ) @@ -459,7 +459,7 @@ def sample_from_loop( df.to_csv(os.path.join(problem_specific_dir, "info.csv")) # solver defination - swril_solver = wm.SwirlSolver( + swirl_solver = UM2N.SwirlSolver( mesh, mesh_fine, mesh_new, @@ -476,7 +476,7 @@ def sample_from_loop( n_monitor_smooth=n_monitor_smooth, ) - swril_solver.solve_problem(callback=sample_from_loop, fail_callback=fail_callback) + swirl_solver.solve_problem(callback=sample_from_loop, fail_callback=fail_callback) print("Done!") # ==== Data Generation Scripts ====================== diff --git a/script/compare.py b/script/compare.py index cb56c4e..5d18c20 100644 --- a/script/compare.py +++ b/script/compare.py @@ -9,7 +9,7 @@ import torch from torch_geometric.data import DataLoader # noqa -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -23,21 +23,21 @@ # MRN_path = "/Users/cw1722/Downloads/MRN_r=5_15,20__smpl/weight/model_999.pth" # noqa MRN_path = "/Users/cw1722/Downloads/MRN_r=5__15,20__cmplx/weight/model_999.pth" # noqa -model_M2N = wm.M2N( +model_M2N = UM2N.M2N( deform_in_c=7, gfe_in_c=2, lfe_in_c=4, ).to(device) -model_M2N = wm.load_model(model_M2N, M2N_weight_path) +model_M2N = UM2N.load_model(model_M2N, M2N_weight_path) -model_MRN = wm.MRN( +model_MRN = UM2N.MRN( deform_in_c=7, gfe_in_c=2, lfe_in_c=4, num_loop=5, ).to(device) model_M2N.eval() -model_MRN = wm.load_model(model_MRN, MRN_path) +model_MRN = UM2N.load_model(model_MRN, MRN_path) model_MRN.eval() # %% dataset load @@ -62,9 +62,9 @@ ] normalise = True loss_func = torch.nn.L1Loss() -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, @@ -84,7 +84,7 @@ def compare_on_dataset(model, dataset): acceleration = [] for data_idx in range(len(dataset)): data = dataset[data_idx] - res = wm.compare_error(model, data, plot=True, n_elem=n_elem) + res = UM2N.compare_error(model, data, plot=True, n_elem=n_elem) tangle.append(res["tangle_num"]) error_og.append(res["error_original_mesh"]) error_ma.append(res["error_ma_mesh"]) @@ -180,7 +180,7 @@ def remove_elems(res): # %% plot sample from m2n data = data_set[2] -wm.compare_error(model_MRN, data, plot=True, n_elem=n_elem) +UM2N.compare_error(model_MRN, data, plot=True, n_elem=n_elem) # %% plot model training loss, test tangle loss_m2n_path = "/Users/cw1722/Downloads/M2N__15,20__cmplx/train_log/loss.csv" @@ -348,9 +348,9 @@ def summrise_info(res): ] data_sets = [ - wm.MeshDataset( + UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, diff --git a/script/evaluate.py b/script/evaluate.py index 3a26a64..4f13c6e 100644 --- a/script/evaluate.py +++ b/script/evaluate.py @@ -12,8 +12,8 @@ import wandb from torch_geometric.loader import DataLoader -import warpmesh as wm -from warpmesh.model.train_util import model_forward +import UM2N +from UM2N.model.train_util import model_forward device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -100,7 +100,7 @@ def init_dir(config, run_id, epoch, ds_root, problem_type, domain): f"{ds_name}", now, ) - wm.mkdir_if_not_exist(experiment_dir) + UM2N.mkdir_if_not_exist(experiment_dir) print("\t## Make eval dir done\n") return experiment_dir @@ -119,9 +119,9 @@ def load_dataset( use_cluster: flag controling whether to use cluset in training. """ dataset_path = os.path.join(ds_root, tar_folder) - dataset = wm.MeshDataset( + dataset = UM2N.MeshDataset( dataset_path, - transform=wm.normalise if wm.normalise else None, + transform=UM2N.normalise if UM2N.normalise else None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, conv_feature=config.conv_feat, @@ -163,20 +163,20 @@ def load_model(config, epoch, experiment_dir): assert model_file is not None, "Model file not found" model = None if config.model_used == "M2N": - model = wm.M2N( + model = UM2N.M2N( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, ) elif config.model_used == "MRN": - model = wm.MRN( + model = UM2N.MRN( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRT" or config.model_used == "MRTransformer": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -191,7 +191,7 @@ def load_model(config, epoch, experiment_dir): device=device, ) elif config.model_used == "M2T": - model = wm.M2T( + model = UM2N.M2T( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -207,7 +207,7 @@ def load_model(config, epoch, experiment_dir): device=device, ) elif config.model_used == "M2N_T": - model = wm.M2N_T( + model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, @@ -215,7 +215,7 @@ def load_model(config, epoch, experiment_dir): else: print("Model not found") model_file_path = os.path.join(experiment_dir, target_file_name) - model = wm.load_model(model, model_file_path) + model = UM2N.load_model(model, model_file_path) return model @@ -284,10 +284,10 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= plot_more_dir = os.path.join(eval_dir, "plot_more") plot_data_dir = os.path.join(eval_dir, "plot_data") print("log_dir issss", log_dir) - wm.mkdir_if_not_exist(log_dir) - wm.mkdir_if_not_exist(plot_dir) - wm.mkdir_if_not_exist(plot_more_dir) - wm.mkdir_if_not_exist(plot_data_dir) + UM2N.mkdir_if_not_exist(log_dir) + UM2N.mkdir_if_not_exist(plot_dir) + UM2N.mkdir_if_not_exist(plot_more_dir) + UM2N.mkdir_if_not_exist(plot_data_dir) model = model.to(device) total_num = len(dataset) @@ -329,7 +329,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= end = time.perf_counter() dur_ms = (end - start) * 1000 temp_time_consumption = dur_ms - temp_tangled_elem = wm.get_sample_tangle(out, sample.y, sample.face) + temp_tangled_elem = UM2N.get_sample_tangle(out, sample.y, sample.face) temp_loss = 1000 * torch.nn.L1Loss()(out, sample.y) # define mesh & fine mesh for comparison if domain == "square": @@ -360,7 +360,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= ) mesh_model.coordinates.dat.data[:] = out.detach().cpu().numpy() - compare_res = wm.compare_error( + compare_res = UM2N.compare_error( sample, mesh, mesh_fine, @@ -422,7 +422,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= ) log_df.to_csv(os.path.join(log_dir, f"log_{idx:04d}.csv")) - fig = wm.plot_mesh_compare_benchmark( + fig = UM2N.plot_mesh_compare_benchmark( out.cpu(), sample.y.cpu(), sample.face.cpu(), @@ -470,7 +470,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= mesh_fine = fd.UnitSquareMesh(100, 100) # solver defination - swril_solver = wm.SwirlSolver( + swril_solver = UM2N.SwirlSolver( mesh, mesh_fine, mesh_ma, @@ -487,7 +487,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= n_step=n_step, ) - # evaluator = wm.SwirlEvaluator( + # evaluator = UM2N.SwirlEvaluator( # mesh, # mesh_coarse, # mesh_fine, @@ -530,7 +530,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, start_idx=0, num_samples= mesh_new = fd.Mesh(os.path.join(ds_root, "mesh", "mesh.msh")) mesh_fine = fd.Mesh(os.path.join(ds_root, "mesh_fine", "mesh.msh")) - evaluator = wm.BurgersEvaluator( + evaluator = UM2N.BurgersEvaluator( mesh, mesh_fine, mesh_new, @@ -667,7 +667,7 @@ def write_sumo(eval_dir, ds_root): fig.suptitle(f"{fig_title}", fontsize=16) fig.savefig(os.path.join(summary_save_path, "error_reduction_sumo.png")) - big_df_res = wm.write_stat(eval_dir) + big_df_res = UM2N.write_stat(eval_dir) big_df_res["fig"].savefig(os.path.join(summary_save_path, "error_hist.png")) # noqa big_df_res["df"].to_csv(os.path.join(summary_save_path, "all_info.csv")) diff --git a/script/evaluate_burgers.py b/script/evaluate_burgers.py index 11f2c5f..9dd85bc 100644 --- a/script/evaluate_burgers.py +++ b/script/evaluate_burgers.py @@ -12,7 +12,7 @@ import torch import wandb -import warpmesh as wm +import UM2N os.environ["OMP_NUM_THREADS"] = "1" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -116,7 +116,7 @@ def init_dir(config): + "_" + run_id, ) - wm.mkdir_if_not_exist(experiment_dir) + UM2N.mkdir_if_not_exist(experiment_dir) print("\t## Make eval dir done\n") return experiment_dir @@ -135,9 +135,9 @@ def load_dataset( use_cluster: flag controling whether to use cluset in training. """ dataset_path = os.path.join(ds_root, tar_folder) - dataset = wm.MeshDataset( + dataset = UM2N.MeshDataset( dataset_path, - transform=wm.normalise if wm.normalise else None, + transform=UM2N.normalise if UM2N.normalise else None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, conv_feature=config.conv_feat, @@ -178,20 +178,20 @@ def load_model(config, epoch, experiment_dir): assert model_file is not None, "Model file not found" model = None if config.model_used == "M2N": - model = wm.M2N( + model = UM2N.M2N( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, ) elif config.model_used == "MRN": - model = wm.MRN( + model = UM2N.MRN( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRT": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -207,7 +207,7 @@ def load_model(config, epoch, experiment_dir): else: print("Model not found") model_file_path = os.path.join(experiment_dir, target_file_name) - model = wm.load_model(model, model_file_path) + model = UM2N.load_model(model, model_file_path) return model @@ -219,7 +219,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root, case_idxs): mesh_new = fd.Mesh(os.path.join(ds_root, "mesh", "mesh.msh")) mesh_fine = fd.Mesh(os.path.join(ds_root, "mesh_fine", "mesh.msh")) - evaluator = wm.BurgersEvaluator( + evaluator = UM2N.BurgersEvaluator( mesh, mesh_fine, mesh_new, diff --git a/script/evaluate_model.py b/script/evaluate_model.py index 17a4397..2ed4e0e 100644 --- a/script/evaluate_model.py +++ b/script/evaluate_model.py @@ -21,7 +21,7 @@ import torch from torch_geometric.data import DataLoader # noqa -import warpmesh as wm +import UM2N torch.no_grad() warnings.filterwarnings("ignore") @@ -51,10 +51,10 @@ ] # %% load model -model = wm.M2N_og( +model = UM2N.M2N_og( deform_in_c=7, gfe_in_c=1, lfe_in_c=3, ).to(device) -model = wm.load_model(model, model_weight_path) +model = UM2N.load_model(model, model_weight_path) model.eval() diff --git a/script/evaluate_swirl.py b/script/evaluate_swirl.py index f2aa5c4..92a8a29 100644 --- a/script/evaluate_swirl.py +++ b/script/evaluate_swirl.py @@ -12,7 +12,7 @@ import torch import wandb -import warpmesh as wm +import UM2N os.environ["OMP_NUM_THREADS"] = "1" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -44,7 +44,7 @@ def init_dir(config): eval_dir, "swirl" + "_" + now + "_" + config.model_used + "_" + str(epoch) + "_" + run_id, ) - wm.mkdir_if_not_exist(experiment_dir) + UM2N.mkdir_if_not_exist(experiment_dir) print("\t## Make eval dir done\n") return experiment_dir @@ -63,9 +63,9 @@ def load_dataset( use_cluster: flag controling whether to use cluset in training. """ dataset_path = os.path.join(ds_root, tar_folder) - dataset = wm.MeshDataset( + dataset = UM2N.MeshDataset( dataset_path, - transform=wm.normalise if wm.normalise else None, + transform=UM2N.normalise if UM2N.normalise else None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, conv_feature=config.conv_feat, @@ -106,20 +106,20 @@ def load_model(config, epoch, experiment_dir): assert model_file is not None, "Model file not found" model = None if config.model_used == "M2N": - model = wm.M2N( + model = UM2N.M2N( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, ) elif config.model_used == "MRN": - model = wm.MRN( + model = UM2N.MRN( gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, deform_in_c=config.num_deform_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRT": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -136,7 +136,7 @@ def load_model(config, epoch, experiment_dir): else: print("Model not found") model_file_path = os.path.join(experiment_dir, target_file_name) - model = wm.load_model(model, model_file_path) + model = UM2N.load_model(model, model_file_path) return model @@ -156,7 +156,7 @@ def benchmark_model(model, dataset, eval_dir, ds_root): # fd.triplot(mesh) # fd.triplot(mesh_fine) - evaluator = wm.SwirlEvaluator( + evaluator = UM2N.SwirlEvaluator( mesh, mesh_fine, mesh_new, diff --git a/script/gradual_change.py b/script/gradual_change.py index 2f363a1..763dccc 100644 --- a/script/gradual_change.py +++ b/script/gradual_change.py @@ -10,7 +10,7 @@ import torch from torch_geometric.data import DataLoader # noqa -import warpmesh as wm +import UM2N torch.no_grad() warnings.filterwarnings("ignore") # noqa @@ -24,20 +24,20 @@ # MRN_path = "/Users/cw1722/Downloads/MRN_r=5_15,20__smpl/weight/model_999.pth" # noqa MRN_path = "/Users/cw1722/Downloads/MRN_r=5__15,20__cmplx/weight/model_999.pth" # noqa -model_M2N = wm.M2N( +model_M2N = UM2N.M2N( deform_in_c=7, gfe_in_c=2, lfe_in_c=4, ).to(device) -model_M2N = wm.load_model(model_M2N, M2N_weight_path) +model_M2N = UM2N.load_model(model_M2N, M2N_weight_path) -model_MRN = wm.MRN( +model_MRN = UM2N.MRN( deform_in_c=7, gfe_in_c=2, lfe_in_c=4, num_loop=5, ).to(device) -model_MRN = wm.load_model(model_MRN, MRN_path) +model_MRN = UM2N.load_model(model_MRN, MRN_path) # %% dataset load x_feat = [ @@ -61,9 +61,9 @@ ] normalise = True loss_func = torch.nn.L1Loss() -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, @@ -112,9 +112,9 @@ def plot_gradual_change(idx, data_set=data_set, model=model_MRN): ] data_sets = [ - wm.MeshDataset( + UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, diff --git a/script/test_helmholtz_square.py b/script/test_helmholtz_square.py index c994222..d2244e8 100644 --- a/script/test_helmholtz_square.py +++ b/script/test_helmholtz_square.py @@ -13,7 +13,7 @@ import pandas as pd import wandb -import warpmesh as wm +import UM2N entity = "w-chunyang" project_name = "warpmesh" @@ -208,7 +208,7 @@ def move_data(target, source, start, num_file): print("Generating Sample: " + str(i)) mesh = fd.RectangleMesh(num_grid_x, num_grid_y, scale_x, scale_y) # Generate Random solution field - rand_u_generator = wm.RandSourceGenerator( + rand_u_generator = UM2N.RandSourceGenerator( use_iso=use_iso, dist_params={ "max_dist": max_dist, @@ -225,12 +225,12 @@ def move_data(target, source, start, num_file): "c_max": c_max, }, ) - helmholtz_eq = wm.RandHelmholtzEqGenerator(rand_u_generator) + helmholtz_eq = UM2N.RandHelmholtzEqGenerator(rand_u_generator) res = helmholtz_eq.discretise(mesh) # discretise the equation dist_params = rand_u_generator.get_dist_params() print("dist_params", dist_params) # Solve the equation - solver = wm.EquationSolver( + solver = UM2N.EquationSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -240,7 +240,7 @@ def move_data(target, source, start, num_file): ) uh = solver.solve_eq() # Generate Mesh - hessian = wm.MeshGenerator( + hessian = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -249,7 +249,7 @@ def move_data(target, source, start, num_file): } ).get_hessian(mesh) - hessian_norm = wm.MeshGenerator( + hessian_norm = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -263,7 +263,7 @@ def move_data(target, source, start, num_file): func_vec_space = fd.VectorFunctionSpace(mesh, "CG", 1) grad_uh_interpolate = fd.interpolate(fd.grad(uh), func_vec_space) - mesh_gen = wm.MeshGenerator( + mesh_gen = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -295,7 +295,7 @@ def move_data(target, source, start, num_file): # solve the equation on the new mesh new_res = helmholtz_eq.discretise(new_mesh) - new_solver = wm.EquationSolver( + new_solver = UM2N.EquationSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -306,7 +306,7 @@ def move_data(target, source, start, num_file): uh_new = new_solver.solve_eq() # process the data for training - mesh_processor = wm.MeshProcessor( + mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], @@ -388,9 +388,9 @@ def move_data(target, source, start, num_file): run = api.run(f"{entity}/{project_name}/{run_id}") config = SimpleNamespace(**run.config) dataset_path = problem_data_dir - dataset = wm.MeshDataset( + dataset = UM2N.MeshDataset( dataset_path, - transform=wm.normalise if wm.normalise else None, + transform=UM2N.normalise if UM2N.normalise else None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, conv_feature=config.conv_feat, @@ -404,7 +404,7 @@ def move_data(target, source, start, num_file): add_nei=True, ) print(dataset[i].dist_params) - compare_res = wm.compare_error(dataset[i], mesh, high_res_mesh, new_mesh, 0) + compare_res = UM2N.compare_error(dataset[i], mesh, high_res_mesh, new_mesh, 0) print("compare_res", compare_res) diff --git a/script/train_model.py b/script/train_model.py index 3620149..48aa4fc 100644 --- a/script/train_model.py +++ b/script/train_model.py @@ -13,7 +13,7 @@ from rich.progress import Progress from torch_geometric.data import DataLoader -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -31,8 +31,8 @@ def weighted_mse(out, data, weight): use_jacob = True -# model = wm.M2N(gfe_in_c=2, lfe_in_c=4) -model = wm.MRN(gfe_in_c=2, lfe_in_c=4) +# model = UM2N.M2N(gfe_in_c=2, lfe_in_c=4) +model = UM2N.MRN(gfe_in_c=2, lfe_in_c=4) weight_decay = 5e-4 train_batch_size = 20 @@ -99,9 +99,9 @@ def weighted_mse(out, data, weight): trainlog_dir = os.path.join(output_dir, "train_log") weight_dir = os.path.join(output_dir, "weight") -wm.mkdir_if_not_exist(prediction_dir) -wm.mkdir_if_not_exist(trainlog_dir) -wm.mkdir_if_not_exist(weight_dir) +UM2N.mkdir_if_not_exist(prediction_dir) +UM2N.mkdir_if_not_exist(trainlog_dir) +UM2N.mkdir_if_not_exist(weight_dir) df = pd.DataFrame( { @@ -135,23 +135,23 @@ def weighted_mse(out, data, weight): # ================LOAD DATA==================================== -train_set = wm.MeshDataset( +train_set = UM2N.MeshDataset( os.path.join(data_path, "train"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, ) -test_set = wm.MeshDataset( +test_set = UM2N.MeshDataset( os.path.join(data_path, "test"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, ) -val_set = wm.MeshDataset( +val_set = UM2N.MeshDataset( os.path.join(data_path, "val"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, @@ -200,7 +200,7 @@ def weighted_mse(out, data, weight): for epoch in range(num_epochs): progress.update(task, advance=1) - train_loss = wm.train( + train_loss = UM2N.train( train_loader, model, optimizer, @@ -208,12 +208,12 @@ def weighted_mse(out, data, weight): loss_func=loss_func, use_jacob=use_jacob, ) - test_loss = wm.evaluate( + test_loss = UM2N.evaluate( test_loader, model, device, loss_func=loss_func, use_jacob=use_jacob ) - train_tangle = wm.check_dataset_tangle(train_set, model, n_elem_x, n_elem_y) - test_tangle = wm.check_dataset_tangle(test_set, model, n_elem_x, n_elem_y) + train_tangle = UM2N.check_dataset_tangle(train_set, model, n_elem_x, n_elem_y) + test_tangle = UM2N.check_dataset_tangle(test_set, model, n_elem_x, n_elem_y) train_loss_arr.append(train_loss) test_loss_arr.append(test_loss) @@ -240,7 +240,7 @@ def weighted_mse(out, data, weight): ) # plot prediction - wm.plot_prediction( + UM2N.plot_prediction( val_set, model, prediction_dir, @@ -249,7 +249,7 @@ def weighted_mse(out, data, weight): n_elem_x=n_elem_x, n_elem_y=n_elem_y, ) - wm.plot_prediction( + UM2N.plot_prediction( test_set, model, prediction_dir, @@ -260,11 +260,11 @@ def weighted_mse(out, data, weight): ) # plot traing curve - fig = wm.plot_loss(train_loss_arr, test_loss_arr, epoch_arr) + fig = UM2N.plot_loss(train_loss_arr, test_loss_arr, epoch_arr) fig.savefig(os.path.join(trainlog_dir, "loss.png")) # plot avg tangle curve - fig = wm.plot_tangle(train_tangle_arr, test_tangle_arr, epoch_arr) + fig = UM2N.plot_tangle(train_tangle_arr, test_tangle_arr, epoch_arr) fig.savefig(os.path.join(trainlog_dir, "tangle.png")) # write final loss diff --git a/script/train_warpmesh.ipynb b/script/train_um2n.ipynb similarity index 100% rename from script/train_warpmesh.ipynb rename to script/train_um2n.ipynb diff --git a/script/vis_dataset.py b/script/vis_dataset.py index d7691cd..c2847b0 100644 --- a/script/vis_dataset.py +++ b/script/vis_dataset.py @@ -6,7 +6,7 @@ import matplotlib.patches as patches import matplotlib.pyplot as plt -import warpmesh as wm +import UM2N x_feat = [ "coord", @@ -46,9 +46,9 @@ ) print(data_dir) -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, @@ -110,9 +110,9 @@ ) print(data_dir) -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, diff --git a/test_finite_difference.py b/test_finite_difference.py index b5795af..335369f 100644 --- a/test_finite_difference.py +++ b/test_finite_difference.py @@ -6,7 +6,7 @@ import torch.nn as nn from torch_geometric.data import DataLoader -from warpmesh.loader import AggreateDataset, MeshDataset, normalise +from UM2N.loader import AggreateDataset, MeshDataset, normalise def interpolate(u, ori_mesh_x, ori_mesh_y, moved_x, moved_y): diff --git a/test_flow_past_cylinder_demo.py b/test_flow_past_cylinder_demo.py index fb94b75..d9498f7 100644 --- a/test_flow_past_cylinder_demo.py +++ b/test_flow_past_cylinder_demo.py @@ -10,7 +10,7 @@ import torch import yaml -import warpmesh as wm +import UM2N from inference_utils import InputPack, find_bd, find_edges, get_conv_feat print("Setting up solver.") @@ -32,13 +32,13 @@ # print("# Evaluation Pipeline Started\n") print(config) -model = wm.M2N_T( +model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) model_file_path = "./pretrain_model/model_999.pth" -model = wm.load_model(model, model_file_path) +model = UM2N.load_model(model, model_file_path) # model = load_model(run, config, epoch, "output_sim") model.eval() model = model.to(device) diff --git a/test_flow_past_cylinder_demo_ale.py b/test_flow_past_cylinder_demo_ale.py index 667076e..ef4bcb3 100644 --- a/test_flow_past_cylinder_demo_ale.py +++ b/test_flow_past_cylinder_demo_ale.py @@ -10,7 +10,7 @@ import torch import yaml -import warpmesh as wm +import UM2N from inference_utils import InputPack, find_bd, find_edges, get_conv_feat print("Setting up solver.") @@ -32,13 +32,13 @@ # print("# Evaluation Pipeline Started\n") print(config) -model = wm.M2N_T( +model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) model_file_path = "./pretrain_model/model_999.pth" -model = wm.load_model(model, model_file_path) +model = UM2N.load_model(model, model_file_path) # model = load_model(run, config, epoch, "output_sim") model.eval() model = model.to(device) diff --git a/test_flow_past_plate_demo.py b/test_flow_past_plate_demo.py index 093a31f..58e78b1 100644 --- a/test_flow_past_plate_demo.py +++ b/test_flow_past_plate_demo.py @@ -10,7 +10,7 @@ import torch import yaml -import warpmesh as wm +import UM2N from inference_utils import InputPack, find_bd, find_edges, get_conv_feat print("Setting up solver.") @@ -32,13 +32,13 @@ # print("# Evaluation Pipeline Started\n") print(config) -model = wm.M2N_T( +model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) model_file_path = "./pretrain_model/model_999.pth" -model = wm.load_model(model, model_file_path) +model = UM2N.load_model(model, model_file_path) # model = load_model(run, config, epoch, "output_sim") model.eval() model = model.to(device) diff --git a/test_helm_demo.py b/test_helm_demo.py index 98977a2..4b38af0 100644 --- a/test_helm_demo.py +++ b/test_helm_demo.py @@ -7,7 +7,7 @@ import torch import yaml -import warpmesh as wm +import UM2N from inference_utils import InputPack, find_bd, find_edges, get_conv_feat print("Setting up solver.") @@ -29,13 +29,13 @@ # print("# Evaluation Pipeline Started\n") print(config) -model = wm.M2N_T( +model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) model_file_path = "./pretrain_model/model_999.pth" -model = wm.load_model(model, model_file_path) +model = UM2N.load_model(model, model_file_path) # model = load_model(run, config, epoch, "output_sim") model.eval() model = model.to(device) diff --git a/test_phi_grad.py b/test_phi_grad.py index 847baae..e6afd3d 100644 --- a/test_phi_grad.py +++ b/test_phi_grad.py @@ -9,8 +9,8 @@ from torch.utils.data import SequentialSampler from torch_geometric.data import DataLoader -import warpmesh as wm -from warpmesh.model.train_util import ( +import UM2N +from UM2N.model.train_util import ( generate_samples_structured_grid, model_forward, ) @@ -233,7 +233,7 @@ model = None if config.model_used == "MRTransformer": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -252,9 +252,9 @@ else: config.mesh_feat.extend(["phi", "grad_phi", "jacobian"]) print("mesh feat type ", config.mesh_feat) - test_set = wm.MeshDataset( + test_set = UM2N.MeshDataset( test_dir, - # transform=wm.normalise if wm.normalise else None, + # transform=UM2N.normalise if UM2N.normalise else None, transform=None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, @@ -291,7 +291,7 @@ target_file_name = model_file assert model_file is not None, "Model file not found either on wandb or local." print(target_file_name) - model = wm.load_model(model, model_file) + model = UM2N.load_model(model, model_file) print(model) loss_func = torch.nn.L1Loss() @@ -500,7 +500,7 @@ target_mesh.append(sample.y.detach().cpu().numpy()) target_face.append(sample.face.detach().cpu().numpy()) target_hessian_norm.append(sample.mesh_feat[:, -1].detach().cpu().numpy()) - # compare_fig = wm.plot_mesh_compare( + # compare_fig = UM2N.plot_mesh_compare( # out.detach().cpu().numpy(), sample.y, # sample.face # ) @@ -510,7 +510,7 @@ break -# compare_fig = wm.plot_multiple_mesh_compare(out_mesh_collections, out_loss_collections, target_mesh, target_face) +# compare_fig = UM2N.plot_multiple_mesh_compare(out_mesh_collections, out_loss_collections, target_mesh, target_face) # compare_fig.tight_layout() # compare_fig.subplots_adjust(top=0.95) # compare_fig.suptitle(f"{dataset_name}: Output Mesh Comparsion (mesh resolution {test_ms}, dataloder seed: {random_seed})", fontsize=24) @@ -578,7 +578,7 @@ num_variables = len(variables_collections.keys()) font_size = 24 -mesh_gen = wm.UnstructuredSquareMesh() +mesh_gen = UM2N.UnstructuredSquareMesh() if dataset_name == "helmholtz": model_mesh = mesh_gen.load_mesh( diff --git a/test_ring_demo_perf.py b/test_ring_demo_perf.py index 35c3736..49ee937 100644 --- a/test_ring_demo_perf.py +++ b/test_ring_demo_perf.py @@ -6,7 +6,7 @@ import torch import yaml import pickle -import warpmesh as wm +import UM2N print("Setting up solver.") @@ -49,13 +49,13 @@ def dump_gpu_usage_to_file(filename): print(config) -model = wm.M2N_T( +model = UM2N.M2N_T( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) model_file_path = "./pretrain_model/model_999.pth" -model = wm.load_model(model, model_file_path) +model = UM2N.load_model(model, model_file_path) model.eval() model = model.to(device) ########################################################### diff --git a/test_sampling.py b/test_sampling.py index c4e564f..bdd936b 100644 --- a/test_sampling.py +++ b/test_sampling.py @@ -7,7 +7,7 @@ from torch_geometric.data import DataLoader from torch_geometric.nn import knn_graph -from warpmesh.loader import AggreateDataset, MeshDataset, normalise +from UM2N.loader import AggreateDataset, MeshDataset, normalise def interpolate(u, ori_mesh_x, ori_mesh_y, moved_x, moved_y): diff --git a/tests/dataset_integrity_check.ipynb b/tests/dataset_integrity_check.ipynb index eb9aa1d..b719586 100644 --- a/tests/dataset_integrity_check.ipynb +++ b/tests/dataset_integrity_check.ipynb @@ -9,7 +9,7 @@ "%load_ext autoreload\n", "%autoreload 2\n", "\n", - "import warpmesh as wm\n", + "import UM2N\n", "# import glob\n", "import torch\n", "from torch_geometric.utils import index_to_mask\n", @@ -202,7 +202,7 @@ ], "source": [ "def check_phi_attached(data_path):\n", - " data_set = wm.MeshDataset(data_path)\n", + " data_set = UM2N.MeshDataset(data_path)\n", " for i in range(len(data_set)):\n", " try:\n", " assert hasattr(data_set[i], 'phi')\n", @@ -287,14 +287,14 @@ " # file_pattern = os.path.join(data_path, 'data_*.npy')\n", " # files = glob.glob(file_pattern)\n", " # for file in files:\n", - " data_set = wm.MeshDataset(data_path)\n", + " data_set = UM2N.MeshDataset(data_path)\n", " for i in range(len(data_set)):\n", " coords = data_set[i].x[:, :2]\n", " num_nodes = coords.shape[0]\n", " source_mask = index_to_mask(\n", " torch.tensor([source_idx]), num_nodes\n", " )\n", - " nei = wm.get_neighbors(source_mask, data_set[i].edge_index)\n", + " nei = UM2N.get_neighbors(source_mask, data_set[i].edge_index)\n", " if (nei.sum() == 6):\n", " pass\n", " else:\n", diff --git a/tests/play_conv_feat.py b/tests/play_conv_feat.py index 59de251..8d56e7e 100644 --- a/tests/play_conv_feat.py +++ b/tests/play_conv_feat.py @@ -2,7 +2,7 @@ import firedrake as fd -import warpmesh as wm +import UM2N os.environ["OMP_NUM_THREADS"] = "1" @@ -16,7 +16,7 @@ z_min = 0 mesh = fd.RectangleMesh(num_grid_x, num_grid_y, scale_x, scale_y) -helmholtz_eq = wm.RandomHelmholtzGenerator( +helmholtz_eq = UM2N.RandomHelmholtzGenerator( dist_params={ "max_dist": max_dist, "n_dist": n_dist, @@ -31,7 +31,7 @@ res = helmholtz_eq.discretise(mesh) # discretise the equation -solver = wm.HelmholtzSolver( +solver = UM2N.HelmholtzSolver( params={ "function_space": res["function_space"], "LHS": res["LHS"], @@ -40,7 +40,7 @@ } ) uh = solver.solve_eq() -hessian = wm.MeshGenerator( +hessian = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -49,7 +49,7 @@ } ).get_hessian(mesh) -hessian_norm = wm.MeshGenerator( +hessian_norm = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -62,7 +62,7 @@ func_vec_space = fd.VectorFunctionSpace(mesh, "CG", 1) grad_uh_interpolate = fd.interpolate(fd.grad(uh), func_vec_space) -mesh_gen = wm.MeshGenerator( +mesh_gen = UM2N.MeshGenerator( params={ "num_grid_x": num_grid_x, "num_grid_y": num_grid_y, @@ -75,7 +75,7 @@ # solve the equation on the new mesh new_res = helmholtz_eq.discretise(new_mesh) -new_solver = wm.HelmholtzSolver( +new_solver = UM2N.HelmholtzSolver( params={ "function_space": new_res["function_space"], "LHS": new_res["LHS"], @@ -86,7 +86,7 @@ uh_new = new_solver.solve_eq() # process the data for training -mesh_processor = wm.MeshProcessor( +mesh_processor = UM2N.MeshProcessor( original_mesh=mesh, optimal_mesh=new_mesh, function_space=new_res["function_space"], diff --git a/tests/play_conv_feat_and_fix.py b/tests/play_conv_feat_and_fix.py index f75efc3..fa5dd3d 100644 --- a/tests/play_conv_feat_and_fix.py +++ b/tests/play_conv_feat_and_fix.py @@ -9,7 +9,7 @@ import matplotlib.pyplot as plt import torch -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -31,11 +31,11 @@ "conv_hessian_norm", ] -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( test_data_dir, conv_feature_fix=conv_feat_fix, conv_feature=conv_feat, - transform=wm.normalise, + transform=UM2N.normalise, ) sample = data_set[idx] diff --git a/tests/play_dataset.py b/tests/play_dataset.py index 42af885..8933b4b 100644 --- a/tests/play_dataset.py +++ b/tests/play_dataset.py @@ -3,7 +3,7 @@ import torch -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -12,9 +12,9 @@ data_set_path = os.path.join(project_dir, "data/") data_path = data_set_path -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( os.path.join(data_path, "test"), - transform=wm.normalise, + transform=UM2N.normalise, ) diff --git a/tests/play_eval.py b/tests/play_eval.py index bca821b..1dee468 100644 --- a/tests/play_eval.py +++ b/tests/play_eval.py @@ -3,7 +3,7 @@ import torch from torch_geometric.data import DataLoader -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # noqa @@ -21,9 +21,9 @@ prediction_dir = "/Users/cw1722/Documents/irp/irp-cw1722/data/temp" -model = wm.MRN(deform_in_c=7, gfe_in_c=2, lfe_in_c=4, num_loop=3).to(device) +model = UM2N.MRN(deform_in_c=7, gfe_in_c=2, lfe_in_c=4, num_loop=3).to(device) -model = wm.load_model(model, weight_path) +model = UM2N.load_model(model, weight_path) n_elem_x = n_elem_y = 20 @@ -50,9 +50,9 @@ "conv_hessian_norm", ] -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( data_dir, - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, @@ -60,5 +60,5 @@ loader = DataLoader(data_set, batch_size=batch_size) -loss = wm.evaluate(loader, model, device, loss_func) +loss = UM2N.evaluate(loader, model, device, loss_func) print(loss) diff --git a/tests/play_loader.py b/tests/play_loader.py index a5fa684..63a51b1 100644 --- a/tests/play_loader.py +++ b/tests/play_loader.py @@ -4,7 +4,7 @@ import torch from torch_geometric.data import DataLoader -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -16,9 +16,9 @@ ) data_path = data_set_path -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( os.path.join(data_path, "train"), - transform=wm.normalise, + transform=UM2N.normalise, ) loader = DataLoader(data_set, batch_size=10, shuffle=False) diff --git a/tests/play_model.py b/tests/play_model.py index b296d16..9699b73 100644 --- a/tests/play_model.py +++ b/tests/play_model.py @@ -4,13 +4,13 @@ import torch from torch_geometric.loader import DataLoader -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model = wm.MRN(deform_in_c=3, num_loop=3) +model = UM2N.MRN(deform_in_c=3, num_loop=3) x_feat = [ "coord", @@ -36,9 +36,9 @@ data_set_path = os.path.join(project_dir, "data/") data_path = data_set_path -data_set = wm.MeshDataset( +data_set = UM2N.MeshDataset( os.path.join(data_path, "test"), - transform=wm.normalise if is_normalise else None, + transform=UM2N.normalise if is_normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, diff --git a/tests/play_predict.py b/tests/play_predict.py index 070dd5c..2fea010 100644 --- a/tests/play_predict.py +++ b/tests/play_predict.py @@ -6,7 +6,7 @@ import movement as mv import torch -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -14,7 +14,7 @@ problem = "helmholtz" -model = wm.MRN(gfe_in_c=2, lfe_in_c=4, num_loop=10) +model = UM2N.MRN(gfe_in_c=2, lfe_in_c=4, num_loop=10) weight_path = "/Users/cw1722/Downloads/model_1099.pth" weight_decay = 5e-4 @@ -109,23 +109,23 @@ def plot_prediction(data_set, model, prediction_dir, mode): prediction_dir = "/Users/cw1722/Documents/irp/irp-cw1722/data/temp" -train_set = wm.MeshDataset( +train_set = UM2N.MeshDataset( os.path.join(data_path, "train"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, ) -test_set = wm.MeshDataset( +test_set = UM2N.MeshDataset( os.path.join(data_path, "test"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, ) -val_set = wm.MeshDataset( +val_set = UM2N.MeshDataset( os.path.join(data_path, "val"), - transform=wm.normalise if normalise else None, + transform=UM2N.normalise if normalise else None, x_feature=x_feat, mesh_feature=mesh_feat, conv_feature=conv_feat, diff --git a/tests/test_import.py b/tests/test_import.py index a98a7c5..a7e4c0d 100644 --- a/tests/test_import.py +++ b/tests/test_import.py @@ -5,10 +5,10 @@ @fixture(scope="module") -def warpmesh(): - import warpmesh +def UM2N(): + import UM2N - return warpmesh + return UM2N @fixture(scope="module") @@ -25,7 +25,7 @@ def movement(): return movement -def test_import(warpmesh, firedrake, movement): - assert warpmesh +def test_import(UM2N, firedrake, movement): + assert UM2N assert firedrake assert movement diff --git a/view_dataset_diff.py b/view_dataset_diff.py index 2e770b4..9bc6492 100644 --- a/view_dataset_diff.py +++ b/view_dataset_diff.py @@ -7,9 +7,9 @@ import torch from torch_geometric.data import DataLoader -import warpmesh as wm -from warpmesh.helper import load_yaml_to_namespace -from warpmesh.loader import AggreateDataset, MeshDataset, normalise +import UM2N +from UM2N.helper import load_yaml_to_namespace +from UM2N.loader import AggreateDataset, MeshDataset, normalise # parser = argparse.ArgumentParser( # prog="Warpmesh", description="warp the mesh", epilog="warp the mesh" @@ -130,7 +130,7 @@ def create_dataset(config, data_paths): ) -mesh_gen = wm.UnstructuredSquareMesh() +mesh_gen = UM2N.UnstructuredSquareMesh() mesh_old = mesh_gen.load_mesh( file_path=os.path.join(f"{data_root_old}/mesh", f"mesh{num_selected}.msh") diff --git a/vis_run.py b/vis_run.py index a2e8cc0..b543fc4 100644 --- a/vis_run.py +++ b/vis_run.py @@ -5,7 +5,7 @@ import wandb from torch_geometric.data import DataLoader -import warpmesh as wm +import UM2N warnings.filterwarnings("ignore") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -167,59 +167,59 @@ model = None if config.model_used == "M2N": - model = wm.M2N( + model = UM2N.M2N( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) elif config.model_used == "M2NAtten": - model = wm.M2NAtten( + model = UM2N.M2NAtten( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) elif config.model_used == "MRN": - model = wm.MRN( + model = UM2N.MRN( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "M2N_dynamic_drop": - model = wm.M2N_dynamic_drop( + model = UM2N.M2N_dynamic_drop( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) elif config.model_used == "M2N_dynamic_no_drop": - model = wm.M2N_dynamic_no_drop( + model = UM2N.M2N_dynamic_no_drop( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, ) elif config.model_used == "MRNAtten": - model = wm.MRNAtten( + model = UM2N.MRNAtten( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRNGlobalTransformerEncoder": - model = wm.MRNGlobalTransformerEncoder( + model = UM2N.MRNGlobalTransformerEncoder( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRNLocalTransformerEncoder": - model = wm.MRNLocalTransformerEncoder( + model = UM2N.MRNLocalTransformerEncoder( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, num_loop=config.num_deformer_loop, ) elif config.model_used == "MRTransformer": - model = wm.MRTransformer( + model = UM2N.MRTransformer( num_transformer_in=config.num_transformer_in, num_transformer_out=config.num_transformer_out, num_transformer_embed_dim=config.num_transformer_embed_dim, @@ -231,7 +231,7 @@ device=device, ) elif config.model_used == "M2Transformer": - model = wm.M2Transformer( + model = UM2N.M2Transformer( deform_in_c=config.num_deform_in, gfe_in_c=config.num_gfe_in, lfe_in_c=config.num_lfe_in, @@ -239,9 +239,9 @@ else: raise Exception(f"Model {config.model_used} not implemented.") - test_set = wm.MeshDataset( + test_set = UM2N.MeshDataset( test_dir, - transform=wm.normalise if wm.normalise else None, + transform=UM2N.normalise if UM2N.normalise else None, x_feature=config.x_feat, mesh_feature=config.mesh_feat, conv_feature=config.conv_feat, @@ -274,7 +274,7 @@ target_file_name = model_file assert model_file is not None, "Model file not found either on wandb or local." print(target_file_name) - model = wm.load_model(model, model_file) + model = UM2N.load_model(model, model_file) print(model) loss_func = torch.nn.L1Loss() @@ -288,7 +288,7 @@ # out = model(sample) # print(f"{i} loss: {loss_func(out, sample.y)*1000}") - # compare_fig = wm.plot_mesh_compare( + # compare_fig = UM2N.plot_mesh_compare( # out.detach().cpu().numpy(), sample.y, # sample.face # ) @@ -350,7 +350,7 @@ target_mesh.append(sample.y.detach().cpu().numpy()) target_face.append(sample.face.detach().cpu().numpy()) target_hessian_norm.append(sample.mesh_feat[:, -1].detach().cpu().numpy()) - # compare_fig = wm.plot_mesh_compare( + # compare_fig = UM2N.plot_mesh_compare( # out.detach().cpu().numpy(), sample.y, # sample.face # ) @@ -360,7 +360,7 @@ break -compare_fig = wm.plot_multiple_mesh_compare( +compare_fig = UM2N.plot_multiple_mesh_compare( out_mesh_collections, out_loss_collections, target_mesh, target_face ) compare_fig.tight_layout() @@ -377,13 +377,13 @@ # selected_node = torch.randint(low=0, high=test_ms*test_ms-1, size=(1,)) # selected_node = 888 # print(f"attention map selected node: {selected_node}") -# atten_fig = wm.plot_attentions_map_compare(out_mesh_collections, out_loss_collections, out_atten_collections, target_hessian_norm, target_mesh, target_face, selected_node=selected_node) +# atten_fig = UM2N.plot_attentions_map_compare(out_mesh_collections, out_loss_collections, out_atten_collections, target_hessian_norm, target_mesh, target_face, selected_node=selected_node) # atten_fig.tight_layout() # atten_fig.subplots_adjust(top=0.95) # atten_fig.suptitle(f"Ouput Attention (mesh resolution {test_ms}, dataloder seed: {random_seed})", fontsize=24) # atten_fig.savefig(f"./out_images/attention_reso_{test_ms}_seed_{random_seed}_selected_node_{selected_node}.png") -# atten_fig = wm.plot_attentions_map(out_atten_collections, out_loss_collections) +# atten_fig = UM2N.plot_attentions_map(out_atten_collections, out_loss_collections) # atten_fig.tight_layout() # atten_fig.subplots_adjust(top=0.95) # atten_fig.suptitle(f"Ouput Attention (mesh resolution {test_ms}, dataloder seed: {random_seed})", fontsize=24)