From f65112f8c79cec9cfd1caaac623b698b648daee5 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 27 Oct 2023 10:45:36 +0100 Subject: [PATCH 01/76] Tabulate Dubiner basis using Duffy transform --- FIAT/expansions.py | 206 +++++++++++++++++++++++++++++++++++++++++++++ FIAT/jacobi.py | 2 +- 2 files changed, 207 insertions(+), 1 deletion(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index a5fc112ac..6b9348c26 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -432,3 +432,209 @@ def polynomial_dimension(ref_el, degree): return max(0, (degree + 1) * (degree + 2) * (degree + 3) // 6) else: raise ValueError("Unknown reference element type.") + + +def eta_square(xi): + """Maps from the (-1,1) reference triangle to [-1,1]^2.""" + xi1, xi2 = xi + with numpy.errstate(divide='ignore', invalid='ignore'): + eta1 = 2. * (1. + xi1) / (1. - xi2) - 1. + eta2 = xi2 + if eta1.dtype != object: + eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. + return eta1, eta2 + + +def eta_cube(xi): + """Maps from the (-1,1) reference tetrahedron to [-1,1]^3.""" + xi1, xi2, xi3 = xi + with numpy.errstate(divide='ignore', invalid='ignore'): + eta1 = 2. * (1. + xi1) / (-xi2 - xi3) - 1. + eta2 = 2. * (1. + xi2) / (1. - xi3) - 1. + eta3 = xi3 + if eta1.dtype != object: + eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. + if eta2.dtype != object: + eta2[numpy.logical_not(numpy.isfinite(eta2))] = 1. + return eta1, eta2, eta3 + + +from operator import mul +from functools import reduce +from math import prod + + +def chain_rule(eta, dphi_dxi): + dim = len(eta) + Jii = 1. + dphi_deta = dphi_dxi + for i in reversed(range(dim)): + iupper = range(i + 1, dim) + offdiag = (prod(((1. - eta[k])*0.5 for k in iupper if k != j), (1. + eta[i])*0.5) for j in iupper) + dphi_deta[i] = sum(reduce(mul, dphi_dxi[i+1:], offdiag), dphi_deta[i]) * (1./Jii) + Jii *= (1. - eta[i])*0.5 + return dphi_deta + + +def flat_index(i, j): + return (i + j) * (i + j + 1) // 2 + j + + +def dubiner_1d(order, dim, x): + if dim == 0: + return jacobi.eval_jacobi_batch(0, 0, degree, x[:, None]) + sd = (order + 1) * (order + 2) // 2 + phi = numpy.zeros((sd, x.size), dtype=x.dtype) + xhat = (1. - x) * 0.5 + for j in range(order+1): + n = order - j + alpha = 2 * j + dim + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + if j > 0: + results *= xhat ** j + s = [flat_index(i, j) for i in range(n + 1)] + phi[s, :] = results + return phi + + +def dubiner_deriv_1d(order, dim, x): + if dim == 0: + return jacobi.eval_jacobi_deriv_batch(0, 0, degree, x[:, None]) + sd = (order + 1) * (order + 2) // 2 + dphi = numpy.zeros((sd, x.size), dtype=x.dtype) + xhat = (1. - x) * 0.5 + for j in range(order): + n = order - j + alpha = 2 * j + dim + derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) + if j > 0: + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + derivs *= xhat + derivs -= results * (0.5*j) + if j > 1: + derivs *= xhat ** (j - 1) + s = [flat_index(i, j) for i in range(n + 1)] + dphi[s, :] = derivs + return dphi + + +def dubiner_2d(order, xi, alphas=None): + if alphas is None: + alphas = [(0,) * 2] + sd = (order + 1) * (order + 2) // 2 + eta = eta_square(numpy.transpose(xi)) + B = [dubiner_1d(order, k, x) for k, x in enumerate(eta)] + D = [None] * len(B) + if any(sum(alpha) > 0 for alpha in alphas): + D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] + def idx(p, q): + return (p + q) * (p + q + 1) // 2 + q + + tabulations = {} + for alpha in alphas: + T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] + phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + for i in range(order + 1): + Ti = T[0][i] + for j in range(order + 1 - i): + scale = ((i + 0.5) * (i + j + 1.0)) ** 0.5 + phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale + tabulations[alpha] = phi + return tabulations + + +def dubiner_3d(order, xi, alphas=None): + if alphas is None: + alphas = [(0,) * 3] + sd = (order + 1) * (order + 2) * (order + 3) // 6 + eta = eta_cube(numpy.transpose(xi)) + B = [dubiner_1d(order, k, x) for k, x in enumerate(eta)] + D = [None] * len(B) + if any(sum(alpha) > 0 for alpha in alphas): + D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] + def idx(p, q, r): + return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r + + tabulations = {} + for alpha in alphas: + T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] + phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + for i in range(order + 1): + Ti = T[0][i] + for j in range(order + 1 - i): + Tij = T[1][flat_index(j, i)] * Ti + for k in range(order + 1 - i - j): + scale = ((i + 0.5) * (i + j + 1.0) * (i + j + k + 1.5)) ** 0.5 + phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale + tabulations[alpha] = phi + return tabulations + + +if __name__ == "__main__": + def symmetric_simplex(dim): + s = reference_element.ufc_simplex(dim) + r = lambda x: x ** 0.5 + if dim == 2: + s.vertices = [(0.0, 0.0), (-1.0, -r(3.0)), (1.0, -r(3.0))] + elif dim == 3: + s.vertices = [(r(3.0)/3, 0.0, 0.0), (-r(3.0)/6, 0.5, 0.0), + (-r(3.0)/6, -0.5, 0.0), (0.0, 0.0, r(6.0)/3)] + return s + + dim = 2 + degree = 2 + tabulate = [lambda n, x: jacobi.eval_jacobi_batch(0, 0, n, x), dubiner_2d, dubiner_3d][dim-1] + + # ref_el = symmetric_simplex(dim) + ref_el = reference_element.ufc_simplex(dim) + expansion_set = get_expansion_set(ref_el) + + if dim == 1: + base_ref_el = reference_element.DefaultInterval() + elif dim == 2: + base_ref_el = reference_element.DefaultTriangle() + elif dim == 3: + base_ref_el = reference_element.DefaultTetrahedron() + + v1 = ref_el.get_vertices() + v2 = base_ref_el.get_vertices() + A, b = reference_element.make_affine_mapping(v1, v2) + mapping = lambda x: numpy.dot(x, A.T) + b + + if 1: + alphas = [(0,) * dim] + alphas.extend(tuple(row) for row in numpy.eye(dim)) + simplify = lambda x: numpy.array(sympy.simplify(x)) + X = [tuple(map(sympy.Symbol, ("x", "y", "z")[:dim]))] + Tnew = tabulate(degree, mapping(X), alphas=alphas) + Told = expansion_set.tabulate(degree, X) + print("New") + print(simplify(Tnew[(0,) * dim])) + print("Old") + print(simplify(Told)) + + dz = tabulate(degree, mapping(X), alphas=alphas) + dy = expansion_set.tabulate_derivatives(degree, X) + for alpha, Xi in zip(alphas, X): + print("New") + print(simplify(Tnew[alpha])) + print("Old") + print(simplify(sympy.diff(Told, Xi))) + + else: + import FIAT + from matplotlib import pyplot as plt + + line = reference_element.ufc_simplex(1) + lr = FIAT.quadrature.GaussLobattoLegendreQuadratureLineRule + point_set = FIAT.recursive_points.RecursivePointSet(lambda n: lr(line, n+1).get_points() if n else None) + points = point_set.recursive_points(ref_el.get_vertices(), degree*5) + phi = tabulate(degree, mapping(points)) + z = phi[(0,) * dim] + y = expansion_set.tabulate(degree, points) + + x = numpy.array(points) + fig, ax = plt.subplots(subplot_kw={"projection": "3d"}) + ax.plot_trisurf(x[:, 0], x[:, 1], z[-1], linewidth=0.2, antialiased=True) + ax.plot_trisurf(x[:, 0], x[:, 1], y[-1], linewidth=0.2, antialiased=True) + plt.show() diff --git a/FIAT/jacobi.py b/FIAT/jacobi.py index ab78a1e3b..d167ca74f 100644 --- a/FIAT/jacobi.py +++ b/FIAT/jacobi.py @@ -90,7 +90,7 @@ def eval_jacobi_deriv_batch(a, b, n, xs): Returns a two-dimensional array of points, where the rows correspond to the Jacobi polynomials and the columns correspond to the points.""" - results = numpy.zeros((n + 1, len(xs)), "d") + results = numpy.zeros((n + 1, len(xs)), xs.dtype) if n == 0: return results else: From 45b4385d030a50a6c1cde9fe36c2b8c81f0d3e9c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 27 Oct 2023 17:08:26 +0100 Subject: [PATCH 02/76] test derivatives --- FIAT/expansions.py | 83 ++++++++++++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 29 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 6b9348c26..7afb82781 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -464,16 +464,22 @@ def eta_cube(xi): from math import prod -def chain_rule(eta, dphi_dxi): +def chain_rule(eta, dphi_deta): dim = len(eta) - Jii = 1. - dphi_deta = dphi_dxi - for i in reversed(range(dim)): - iupper = range(i + 1, dim) - offdiag = (prod(((1. - eta[k])*0.5 for k in iupper if k != j), (1. + eta[i])*0.5) for j in iupper) - dphi_deta[i] = sum(reduce(mul, dphi_dxi[i+1:], offdiag), dphi_deta[i]) * (1./Jii) - Jii *= (1. - eta[i])*0.5 - return dphi_deta + + dphi_dxi = list(map(sympy.Symbol, ("fx", "fy", "fz")[:dim])) + for i in range(dim): + offdiag = [prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) * (1. + eta[j])*0.5 for j in range(i)] + dphi_dxi[i] += sum(reduce(mul, dphi_dxi[:i], offdiag)) + dphi_dxi[i] /= prod((1. - eta[k])*0.5 for k in range(i+1, dim)) + print(dphi_dxi) + + + dphi_dxi = [dphi_deta[alpha] for alpha in sorted(reversed(dphi_deta)) if sum(alpha) == 1] + for i in range(dim): + offdiag = [prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) * (1. + eta[j])*0.5 for j in range(i)] + dphi_dxi[i] += sum(reduce(mul, dphi_dxi[:i], offdiag)) + dphi_dxi[i] /= prod((1. - eta[k])*0.5 for k in range(i+1, dim)) def flat_index(i, j): @@ -492,8 +498,8 @@ def dubiner_1d(order, dim, x): results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) if j > 0: results *= xhat ** j - s = [flat_index(i, j) for i in range(n + 1)] - phi[s, :] = results + indices = [flat_index(i, j) for i in range(n + 1)] + phi[indices, :] = results return phi @@ -510,11 +516,11 @@ def dubiner_deriv_1d(order, dim, x): if j > 0: results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) derivs *= xhat - derivs -= results * (0.5*j) + derivs += results * (-0.5*j) if j > 1: derivs *= xhat ** (j - 1) - s = [flat_index(i, j) for i in range(n + 1)] - dphi[s, :] = derivs + indices = [flat_index(i, j) for i in range(n + 1)] + dphi[indices, :] = derivs return dphi @@ -523,16 +529,17 @@ def dubiner_2d(order, xi, alphas=None): alphas = [(0,) * 2] sd = (order + 1) * (order + 2) // 2 eta = eta_square(numpy.transpose(xi)) - B = [dubiner_1d(order, k, x) for k, x in enumerate(eta)] + B = [dubiner_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] D = [None] * len(B) if any(sum(alpha) > 0 for alpha in alphas): - D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] + D = [dubiner_deriv_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] + def idx(p, q): return (p + q) * (p + q + 1) // 2 + q tabulations = {} for alpha in alphas: - T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] + T = [Bj if aj == 0 else Dj for aj, Bj, Dj in zip(alpha, B, D)] phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) for i in range(order + 1): Ti = T[0][i] @@ -540,6 +547,11 @@ def idx(p, q): scale = ((i + 0.5) * (i + j + 1.0)) ** 0.5 phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale tabulations[alpha] = phi + + print(tabulations[(1,0)]) + if len([alpha for alpha in alphas if sum(alpha) == 1]) == len(eta): + chain_rule(eta, tabulations) + print(tabulations[(1,0)]) return tabulations @@ -567,6 +579,10 @@ def idx(p, q, r): scale = ((i + 0.5) * (i + j + 1.0) * (i + j + k + 1.5)) ** 0.5 phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale tabulations[alpha] = phi + + gradients = [tabulations[alpha] for alpha in alphas if sum(alpha)] + if len(gradients) == len(eta): + chain_rule(eta, gradients) return tabulations @@ -587,7 +603,6 @@ def symmetric_simplex(dim): # ref_el = symmetric_simplex(dim) ref_el = reference_element.ufc_simplex(dim) - expansion_set = get_expansion_set(ref_el) if dim == 1: base_ref_el = reference_element.DefaultInterval() @@ -596,30 +611,40 @@ def symmetric_simplex(dim): elif dim == 3: base_ref_el = reference_element.DefaultTetrahedron() + ref_el = base_ref_el + v1 = ref_el.get_vertices() v2 = base_ref_el.get_vertices() A, b = reference_element.make_affine_mapping(v1, v2) mapping = lambda x: numpy.dot(x, A.T) + b + expansion_set = get_expansion_set(ref_el) if 1: + X = [tuple(map(sympy.Symbol, ("x", "y", "z")[:dim]))] + print("Dubiner flat") + print(dubiner_1d(degree, 1, numpy.array(X[0][:1]))) + print(dubiner_deriv_1d(degree, 1, numpy.array(X[0][:1]))) + + print("Affine mapping") + print(A) + print(b) alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim)) + alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) simplify = lambda x: numpy.array(sympy.simplify(x)) - X = [tuple(map(sympy.Symbol, ("x", "y", "z")[:dim]))] - Tnew = tabulate(degree, mapping(X), alphas=alphas) Told = expansion_set.tabulate(degree, X) - print("New") + Tnew = tabulate(degree, mapping(X), alphas=alphas) + + print("New phi(X)") print(simplify(Tnew[(0,) * dim])) - print("Old") + print("Old phi(X)") print(simplify(Told)) - dz = tabulate(degree, mapping(X), alphas=alphas) - dy = expansion_set.tabulate_derivatives(degree, X) - for alpha, Xi in zip(alphas, X): - print("New") + for i, (alpha, Xi) in enumerate(zip(alphas[1:], X[0])): + print("New d/dX_%d phi" % i) print(simplify(Tnew[alpha])) - print("Old") - print(simplify(sympy.diff(Told, Xi))) + print("Old d/dX_%d phi" % i) + Di = lambda f: [sympy.simplify(sympy.diff(f[0], Xi))] + print(numpy.array(list(map(Di, Told)))) else: import FIAT From fb37c2286f059c8de9ef0f5e00210da7e08d9a0f Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 27 Oct 2023 23:53:06 +0100 Subject: [PATCH 03/76] Fixed some bugs --- FIAT/expansions.py | 64 +++++++++++++++------------------------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 7afb82781..675b10869 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -459,26 +459,15 @@ def eta_cube(xi): return eta1, eta2, eta3 -from operator import mul -from functools import reduce from math import prod def chain_rule(eta, dphi_deta): dim = len(eta) - - dphi_dxi = list(map(sympy.Symbol, ("fx", "fy", "fz")[:dim])) - for i in range(dim): - offdiag = [prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) * (1. + eta[j])*0.5 for j in range(i)] - dphi_dxi[i] += sum(reduce(mul, dphi_dxi[:i], offdiag)) - dphi_dxi[i] /= prod((1. - eta[k])*0.5 for k in range(i+1, dim)) - print(dphi_dxi) - - - dphi_dxi = [dphi_deta[alpha] for alpha in sorted(reversed(dphi_deta)) if sum(alpha) == 1] + dphi_dxi = [dphi_deta[alpha] for alpha in reversed(sorted(dphi_deta)) if sum(alpha) == 1] for i in range(dim): - offdiag = [prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) * (1. + eta[j])*0.5 for j in range(i)] - dphi_dxi[i] += sum(reduce(mul, dphi_dxi[:i], offdiag)) + for j in range(i): + dphi_dxi[i] += dphi_dxi[j] * (1. + eta[j])*0.5 * prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) dphi_dxi[i] /= prod((1. - eta[k])*0.5 for k in range(i+1, dim)) @@ -509,7 +498,7 @@ def dubiner_deriv_1d(order, dim, x): sd = (order + 1) * (order + 2) // 2 dphi = numpy.zeros((sd, x.size), dtype=x.dtype) xhat = (1. - x) * 0.5 - for j in range(order): + for j in range(order+1): n = order - j alpha = 2 * j + dim derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) @@ -519,24 +508,23 @@ def dubiner_deriv_1d(order, dim, x): derivs += results * (-0.5*j) if j > 1: derivs *= xhat ** (j - 1) + indices = [flat_index(i, j) for i in range(n + 1)] dphi[indices, :] = derivs return dphi -def dubiner_2d(order, xi, alphas=None): - if alphas is None: - alphas = [(0,) * 2] +def dubiner_2d(order, xi): sd = (order + 1) * (order + 2) // 2 eta = eta_square(numpy.transpose(xi)) B = [dubiner_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] - D = [None] * len(B) - if any(sum(alpha) > 0 for alpha in alphas): - D = [dubiner_deriv_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] - + D = [dubiner_deriv_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] def idx(p, q): return (p + q) * (p + q + 1) // 2 + q + dim = len(eta) + alphas = [(0,) * dim] + alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} for alpha in alphas: T = [Bj if aj == 0 else Dj for aj, Bj, Dj in zip(alpha, B, D)] @@ -548,25 +536,21 @@ def idx(p, q): phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale tabulations[alpha] = phi - print(tabulations[(1,0)]) - if len([alpha for alpha in alphas if sum(alpha) == 1]) == len(eta): - chain_rule(eta, tabulations) - print(tabulations[(1,0)]) + chain_rule(eta, tabulations) return tabulations -def dubiner_3d(order, xi, alphas=None): - if alphas is None: - alphas = [(0,) * 3] +def dubiner_3d(order, xi): sd = (order + 1) * (order + 2) * (order + 3) // 6 eta = eta_cube(numpy.transpose(xi)) B = [dubiner_1d(order, k, x) for k, x in enumerate(eta)] - D = [None] * len(B) - if any(sum(alpha) > 0 for alpha in alphas): - D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] + D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] def idx(p, q, r): return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r + dim = len(eta) + alphas = [(0,) * dim] + alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} for alpha in alphas: T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] @@ -580,9 +564,7 @@ def idx(p, q, r): phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale tabulations[alpha] = phi - gradients = [tabulations[alpha] for alpha in alphas if sum(alpha)] - if len(gradients) == len(eta): - chain_rule(eta, gradients) + chain_rule(eta, tabulations) return tabulations @@ -621,27 +603,21 @@ def symmetric_simplex(dim): if 1: X = [tuple(map(sympy.Symbol, ("x", "y", "z")[:dim]))] - print("Dubiner flat") - print(dubiner_1d(degree, 1, numpy.array(X[0][:1]))) - print(dubiner_deriv_1d(degree, 1, numpy.array(X[0][:1]))) - print("Affine mapping") print(A) print(b) - alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) simplify = lambda x: numpy.array(sympy.simplify(x)) Told = expansion_set.tabulate(degree, X) - Tnew = tabulate(degree, mapping(X), alphas=alphas) + Tnew = tabulate(degree, mapping(X)) print("New phi(X)") print(simplify(Tnew[(0,) * dim])) print("Old phi(X)") print(simplify(Told)) - for i, (alpha, Xi) in enumerate(zip(alphas[1:], X[0])): + for i, (alpha, Xi) in enumerate(zip(numpy.eye(dim, dtype=int), X[0])): print("New d/dX_%d phi" % i) - print(simplify(Tnew[alpha])) + print(simplify(Tnew[tuple(alpha)])) print("Old d/dX_%d phi" % i) Di = lambda f: [sympy.simplify(sympy.diff(f[0], Xi))] print(numpy.array(list(map(Di, Told)))) From e6f29537ad822c754ef89be195fe2124471da39f Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 11:32:50 +0100 Subject: [PATCH 04/76] dmat without sympy --- FIAT/expansions.py | 348 +++++++++++++++++------------------------ FIAT/polynomial_set.py | 12 +- 2 files changed, 143 insertions(+), 217 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 675b10869..68ea647e1 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -12,6 +12,66 @@ import sympy from FIAT import reference_element from FIAT import jacobi +from math import prod + + +def flat_index(i, j): + return (i + j) * (i + j + 1) // 2 + j + + +def dubiner_1d(order, dim, x): + if dim == 0: + return jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) + sd = (order + 1) * (order + 2) // 2 + phi = numpy.zeros((sd, x.size), dtype=x.dtype) + xhat = (1. - x) * 0.5 + for j in range(order+1): + n = order - j + alpha = 2 * j + dim + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + if j > 0: + results *= xhat ** j + indices = [flat_index(i, j) for i in range(n + 1)] + phi[indices, :] = results + return phi + + +def dubiner_deriv_1d(order, dim, x): + if dim == 0: + return jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) + sd = (order + 1) * (order + 2) // 2 + dphi = numpy.zeros((sd, x.size), dtype=x.dtype) + xhat = (1. - x) * 0.5 + for j in range(order+1): + n = order - j + alpha = 2 * j + dim + derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) + if j > 0: + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + derivs *= xhat + derivs += results * (-0.5*j) + if j > 1: + derivs *= xhat ** (j - 1) + + indices = [flat_index(i, j) for i in range(n + 1)] + dphi[indices, :] = derivs + return dphi + + +def duffy_chain_rule(A, eta, tabulations): + dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] + dim = len(eta) + for i in range(dim): + for j in range(i): + dphi_dxi[i] += dphi_dxi[j] * (1. + eta[j])*0.5 * prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) + dphi_dxi[i] /= prod((1. - x)*0.5 for x in eta[i+1:]) + + k = 0 + dphi_dx = [sum(dphi_dxi[j] * A[j][i] for j in range(dim)) for i in range(dim)] + for alpha in sorted(tabulations, reverse=True): + if sum(alpha) == 1: + tabulations[alpha] = dphi_dx[k] + k += 1 def jrc(a, b, n): @@ -109,6 +169,31 @@ def xi_tetrahedron(eta): return xi1, xi2, xi3 +def eta_square(xi): + """Maps from the (-1,1) reference triangle to [-1,1]^2.""" + xi1, xi2 = xi + with numpy.errstate(divide='ignore', invalid='ignore'): + eta1 = 2. * (1. + xi1) / (1. - xi2) - 1. + eta2 = xi2 + if eta1.dtype != object: + eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. + return eta1, eta2 + + +def eta_cube(xi): + """Maps from the (-1,1) reference tetrahedron to [-1,1]^3.""" + xi1, xi2, xi3 = xi + with numpy.errstate(divide='ignore', invalid='ignore'): + eta1 = 2. * (1. + xi1) / (-xi2 - xi3) - 1. + eta2 = 2. * (1. + xi2) / (1. - xi3) - 1. + eta3 = xi3 + if eta1.dtype != object: + eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. + if eta2.dtype != object: + eta2[numpy.logical_not(numpy.isfinite(eta2))] = 1. + return eta1, eta2, eta3 + + class PointExpansionSet(object): """Evaluates the point basis on a point reference element.""" @@ -272,7 +357,34 @@ def idx(p, q): return results # return self.scale * results + def _tabulate_duffy(self, n, pts): + def idx(p, q): + return (p + q) * (p + q + 1) // 2 + q + sd = self.get_num_members(n) + xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) + eta = eta_square(xi) + B = [dubiner_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] + D = [dubiner_deriv_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] + dim = len(eta) + alphas = [(0,) * dim] + alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) + tabulations = {} + for alpha in alphas: + T = [Bj if aj == 0 else Dj for aj, Bj, Dj in zip(alpha, B, D)] + phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + for i in range(n + 1): + Ti = T[0][i] + for j in range(n + 1 - i): + scale = ((i + 0.5) * (i + j + 1.0)) ** 0.5 + phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale + tabulations[alpha] = phi + duffy_chain_rule(self.A, eta, tabulations) + return tabulations + def tabulate_derivatives(self, n, pts): + tabulations = self._tabulate_duffy(n, pts) + return [tabulations[tuple(key)] for key in numpy.eye(2, dtype=int)] + order = 1 data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) # Put data in the required data structure, i.e., @@ -384,7 +496,36 @@ def idx(p, q, r): return results + def _tabulate_duffy(self, n, pts): + def idx(p, q, r): + return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r + sd = self.get_num_members(n) + xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) + eta = eta_cube(xi) + B = [dubiner_1d(n, k, x) for k, x in enumerate(eta)] + D = [dubiner_deriv_1d(n, k, x) for k, x in enumerate(eta)] + dim = len(eta) + alphas = [(0,) * dim] + alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) + tabulations = {} + for alpha in alphas: + T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] + phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + for i in range(n + 1): + Ti = T[0][i] + for j in range(n + 1 - i): + Tij = T[1][flat_index(j, i)] * Ti + for k in range(n + 1 - i - j): + scale = ((i + 0.5) * (i + j + 1.0) * (i + j + k + 1.5)) ** 0.5 + phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale + tabulations[alpha] = phi + duffy_chain_rule(self.A, eta, tabulations) + return tabulations + def tabulate_derivatives(self, n, pts): + tabulations = self._tabulate_duffy(n, pts) + return [tabulations[tuple(key)] for key in numpy.eye(3, dtype=int)] + order = 1 D = 3 data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) @@ -432,210 +573,3 @@ def polynomial_dimension(ref_el, degree): return max(0, (degree + 1) * (degree + 2) * (degree + 3) // 6) else: raise ValueError("Unknown reference element type.") - - -def eta_square(xi): - """Maps from the (-1,1) reference triangle to [-1,1]^2.""" - xi1, xi2 = xi - with numpy.errstate(divide='ignore', invalid='ignore'): - eta1 = 2. * (1. + xi1) / (1. - xi2) - 1. - eta2 = xi2 - if eta1.dtype != object: - eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. - return eta1, eta2 - - -def eta_cube(xi): - """Maps from the (-1,1) reference tetrahedron to [-1,1]^3.""" - xi1, xi2, xi3 = xi - with numpy.errstate(divide='ignore', invalid='ignore'): - eta1 = 2. * (1. + xi1) / (-xi2 - xi3) - 1. - eta2 = 2. * (1. + xi2) / (1. - xi3) - 1. - eta3 = xi3 - if eta1.dtype != object: - eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. - if eta2.dtype != object: - eta2[numpy.logical_not(numpy.isfinite(eta2))] = 1. - return eta1, eta2, eta3 - - -from math import prod - - -def chain_rule(eta, dphi_deta): - dim = len(eta) - dphi_dxi = [dphi_deta[alpha] for alpha in reversed(sorted(dphi_deta)) if sum(alpha) == 1] - for i in range(dim): - for j in range(i): - dphi_dxi[i] += dphi_dxi[j] * (1. + eta[j])*0.5 * prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) - dphi_dxi[i] /= prod((1. - eta[k])*0.5 for k in range(i+1, dim)) - - -def flat_index(i, j): - return (i + j) * (i + j + 1) // 2 + j - - -def dubiner_1d(order, dim, x): - if dim == 0: - return jacobi.eval_jacobi_batch(0, 0, degree, x[:, None]) - sd = (order + 1) * (order + 2) // 2 - phi = numpy.zeros((sd, x.size), dtype=x.dtype) - xhat = (1. - x) * 0.5 - for j in range(order+1): - n = order - j - alpha = 2 * j + dim - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - if j > 0: - results *= xhat ** j - indices = [flat_index(i, j) for i in range(n + 1)] - phi[indices, :] = results - return phi - - -def dubiner_deriv_1d(order, dim, x): - if dim == 0: - return jacobi.eval_jacobi_deriv_batch(0, 0, degree, x[:, None]) - sd = (order + 1) * (order + 2) // 2 - dphi = numpy.zeros((sd, x.size), dtype=x.dtype) - xhat = (1. - x) * 0.5 - for j in range(order+1): - n = order - j - alpha = 2 * j + dim - derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) - if j > 0: - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - derivs *= xhat - derivs += results * (-0.5*j) - if j > 1: - derivs *= xhat ** (j - 1) - - indices = [flat_index(i, j) for i in range(n + 1)] - dphi[indices, :] = derivs - return dphi - - -def dubiner_2d(order, xi): - sd = (order + 1) * (order + 2) // 2 - eta = eta_square(numpy.transpose(xi)) - B = [dubiner_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] - D = [dubiner_deriv_1d(order, k, eta_k) for k, eta_k in enumerate(eta)] - def idx(p, q): - return (p + q) * (p + q + 1) // 2 + q - - dim = len(eta) - alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) - tabulations = {} - for alpha in alphas: - T = [Bj if aj == 0 else Dj for aj, Bj, Dj in zip(alpha, B, D)] - phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) - for i in range(order + 1): - Ti = T[0][i] - for j in range(order + 1 - i): - scale = ((i + 0.5) * (i + j + 1.0)) ** 0.5 - phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale - tabulations[alpha] = phi - - chain_rule(eta, tabulations) - return tabulations - - -def dubiner_3d(order, xi): - sd = (order + 1) * (order + 2) * (order + 3) // 6 - eta = eta_cube(numpy.transpose(xi)) - B = [dubiner_1d(order, k, x) for k, x in enumerate(eta)] - D = [dubiner_deriv_1d(order, k, x) for k, x in enumerate(eta)] - def idx(p, q, r): - return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r - - dim = len(eta) - alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) - tabulations = {} - for alpha in alphas: - T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] - phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) - for i in range(order + 1): - Ti = T[0][i] - for j in range(order + 1 - i): - Tij = T[1][flat_index(j, i)] * Ti - for k in range(order + 1 - i - j): - scale = ((i + 0.5) * (i + j + 1.0) * (i + j + k + 1.5)) ** 0.5 - phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale - tabulations[alpha] = phi - - chain_rule(eta, tabulations) - return tabulations - - -if __name__ == "__main__": - def symmetric_simplex(dim): - s = reference_element.ufc_simplex(dim) - r = lambda x: x ** 0.5 - if dim == 2: - s.vertices = [(0.0, 0.0), (-1.0, -r(3.0)), (1.0, -r(3.0))] - elif dim == 3: - s.vertices = [(r(3.0)/3, 0.0, 0.0), (-r(3.0)/6, 0.5, 0.0), - (-r(3.0)/6, -0.5, 0.0), (0.0, 0.0, r(6.0)/3)] - return s - - dim = 2 - degree = 2 - tabulate = [lambda n, x: jacobi.eval_jacobi_batch(0, 0, n, x), dubiner_2d, dubiner_3d][dim-1] - - # ref_el = symmetric_simplex(dim) - ref_el = reference_element.ufc_simplex(dim) - - if dim == 1: - base_ref_el = reference_element.DefaultInterval() - elif dim == 2: - base_ref_el = reference_element.DefaultTriangle() - elif dim == 3: - base_ref_el = reference_element.DefaultTetrahedron() - - ref_el = base_ref_el - - v1 = ref_el.get_vertices() - v2 = base_ref_el.get_vertices() - A, b = reference_element.make_affine_mapping(v1, v2) - mapping = lambda x: numpy.dot(x, A.T) + b - expansion_set = get_expansion_set(ref_el) - - if 1: - X = [tuple(map(sympy.Symbol, ("x", "y", "z")[:dim]))] - print("Affine mapping") - print(A) - print(b) - simplify = lambda x: numpy.array(sympy.simplify(x)) - Told = expansion_set.tabulate(degree, X) - Tnew = tabulate(degree, mapping(X)) - - print("New phi(X)") - print(simplify(Tnew[(0,) * dim])) - print("Old phi(X)") - print(simplify(Told)) - - for i, (alpha, Xi) in enumerate(zip(numpy.eye(dim, dtype=int), X[0])): - print("New d/dX_%d phi" % i) - print(simplify(Tnew[tuple(alpha)])) - print("Old d/dX_%d phi" % i) - Di = lambda f: [sympy.simplify(sympy.diff(f[0], Xi))] - print(numpy.array(list(map(Di, Told)))) - - else: - import FIAT - from matplotlib import pyplot as plt - - line = reference_element.ufc_simplex(1) - lr = FIAT.quadrature.GaussLobattoLegendreQuadratureLineRule - point_set = FIAT.recursive_points.RecursivePointSet(lambda n: lr(line, n+1).get_points() if n else None) - points = point_set.recursive_points(ref_el.get_vertices(), degree*5) - phi = tabulate(degree, mapping(points)) - z = phi[(0,) * dim] - y = expansion_set.tabulate(degree, points) - - x = numpy.array(points) - fig, ax = plt.subplots(subplot_kw={"projection": "3d"}) - ax.plot_trisurf(x[:, 0], x[:, 1], z[-1], linewidth=0.2, antialiased=True) - ax.plot_trisurf(x[:, 0], x[:, 1], y[-1], linewidth=0.2, antialiased=True) - plt.show() diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 373a41f9b..03b833eb0 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -164,15 +164,10 @@ def __init__(self, ref_el, degree, shape=tuple()): dmats = [numpy.array([[0.0]], "d") for i in range(sd)] else: pts = self.point_set.recursive_points(ref_el.get_vertices(), degree) - v = numpy.transpose(expansion_set.tabulate(degree, pts)) - dv = expansion_set.tabulate_derivatives(degree, pts) - dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv] - for i in range(sd)] - dmats = [numpy.linalg.solve(v, numpy.transpose(dtilde)) - for dtilde in dtildes] + for dtilde in dv] PolynomialSet.__init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats) @@ -273,10 +268,7 @@ def __init__(self, ref_el, degree, size=None): # construct dmats. this is the same as ONPolynomialSet. pts = ref_el.make_points(sd, 0, degree + sd + 1) v = numpy.transpose(expansion_set.tabulate(degree, pts)) - vinv = numpy.linalg.inv(v) dv = expansion_set.tabulate_derivatives(degree, pts) - dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv] - for i in range(sd)] - dmats = [numpy.dot(vinv, numpy.transpose(dtilde)) for dtilde in dtildes] + dmats = [numpy.linalg.solve(v, numpy.transpose(dtilde)) for dtilde in dv] PolynomialSet.__init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats) From aa57e50dcab216ccbe901b10a19e7085d2bb8ab0 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 12:02:24 +0100 Subject: [PATCH 05/76] ExpansionSet base class with make_dmats method --- FIAT/expansions.py | 46 +++++++++++++++++++++++++++++++++--------- FIAT/polynomial_set.py | 16 ++------------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 68ea647e1..acfb6a3db 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -14,6 +14,10 @@ from FIAT import jacobi from math import prod +from FIAT.reference_element import UFCInterval +from FIAT.quadrature import GaussLegendreQuadratureLineRule +from FIAT.recursive_points import RecursivePointSet + def flat_index(i, j): return (i + j) * (i + j + 1) // 2 + j @@ -194,7 +198,28 @@ def eta_cube(xi): return eta1, eta2, eta3 -class PointExpansionSet(object): +class ExpansionSet(object): + point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) + + def __init__(self, ref_el): + pass + + def _tabulate_duffy(self, degree, pts): + raise NotImplementedError + + def make_dmats(self, degree): + pts = self.point_set.recursive_points(self.ref_el.get_vertices(), degree) + tabulations = self._tabulate_duffy(degree, pts) + dmats = [] + v, = [tabulations[alpha] for alpha in tabulations if sum(alpha) == 0] + for alpha in sorted(tabulations, reverse=True): + if sum(alpha) == 1: + dv = tabulations[alpha] + dmats.append(numpy.linalg.solve(v.T, dv.T)) + return dmats + + +class PointExpansionSet(ExpansionSet): """Evaluates the point basis on a point reference element.""" def __init__(self, ref_el): @@ -222,7 +247,7 @@ def tabulate_derivatives(self, n, pts): return deriv_vals -class LineExpansionSet(object): +class LineExpansionSet(ExpansionSet): """Evaluates the Legendre basis on a line reference element.""" def __init__(self, ref_el): @@ -253,6 +278,13 @@ def tabulate(self, n, pts): else: return [] + def _tabulate_duffy(self, n, pts): + xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) + tabulations = {(0,): dubiner_1d(n, 0, xi), + (1,): dubiner_deriv_1d(n, 0, xi)} + duffy_chain_rule(self.A, xi, tabulations) + return tabulations + def tabulate_derivatives(self, n, pts): """Returns a tuple of length one (A,) such that A[i,j] = D phi_i(pts[j]). The tuple is returned for @@ -281,7 +313,7 @@ def tabulate_derivatives(self, n, pts): return dv -class TriangleExpansionSet(object): +class TriangleExpansionSet(ExpansionSet): """Evaluates the orthonormal Dubiner basis on a triangular reference element.""" @@ -382,9 +414,6 @@ def idx(p, q): return tabulations def tabulate_derivatives(self, n, pts): - tabulations = self._tabulate_duffy(n, pts) - return [tabulations[tuple(key)] for key in numpy.eye(2, dtype=int)] - order = 1 data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) # Put data in the required data structure, i.e., @@ -401,7 +430,7 @@ def tabulate_jet(self, n, pts, order=1): return _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) -class TetrahedronExpansionSet(object): +class TetrahedronExpansionSet(ExpansionSet): """Collapsed orthonormal polynomial expanion on a tetrahedron.""" def __init__(self, ref_el): @@ -523,9 +552,6 @@ def idx(p, q, r): return tabulations def tabulate_derivatives(self, n, pts): - tabulations = self._tabulate_duffy(n, pts) - return [tabulations[tuple(key)] for key in numpy.eye(3, dtype=int)] - order = 1 D = 3 data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 03b833eb0..ca569a9dd 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -18,9 +18,6 @@ import numpy from FIAT import expansions from FIAT.functional import index_iterator -from FIAT.reference_element import UFCInterval -from FIAT.quadrature import GaussLegendreQuadratureLineRule -from FIAT.recursive_points import RecursivePointSet def mis(m, n): @@ -128,8 +125,6 @@ class ONPolynomialSet(PolynomialSet): for vector- and tensor-valued sets as well. """ - point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) - def __init__(self, ref_el, degree, shape=tuple()): if shape == tuple(): @@ -163,11 +158,7 @@ def __init__(self, ref_el, degree, shape=tuple()): if degree == 0: dmats = [numpy.array([[0.0]], "d") for i in range(sd)] else: - pts = self.point_set.recursive_points(ref_el.get_vertices(), degree) - v = numpy.transpose(expansion_set.tabulate(degree, pts)) - dv = expansion_set.tabulate_derivatives(degree, pts) - dmats = [numpy.linalg.solve(v, numpy.transpose(dtilde)) - for dtilde in dv] + dmats = expansion_set.make_dmats(degree) PolynomialSet.__init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats) @@ -266,9 +257,6 @@ def __init__(self, ref_el, degree, size=None): cur_bf += 1 # construct dmats. this is the same as ONPolynomialSet. - pts = ref_el.make_points(sd, 0, degree + sd + 1) - v = numpy.transpose(expansion_set.tabulate(degree, pts)) - dv = expansion_set.tabulate_derivatives(degree, pts) - dmats = [numpy.linalg.solve(v, numpy.transpose(dtilde)) for dtilde in dv] + dmats = expansion_set.make_dmats(degree) PolynomialSet.__init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats) From cd9adca8e99e5996214277176bcfa90c7328a12d Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 12:44:52 +0100 Subject: [PATCH 06/76] Abstract ExpansionSet constructor --- FIAT/barycentric_interpolation.py | 20 ++++++--------- FIAT/expansions.py | 42 +++++++++++++------------------ FIAT/polynomial_set.py | 4 +-- 3 files changed, 28 insertions(+), 38 deletions(-) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py index 2bc33de42..a709f3986 100644 --- a/FIAT/barycentric_interpolation.py +++ b/FIAT/barycentric_interpolation.py @@ -28,7 +28,6 @@ class LagrangeLineExpansionSet(expansions.LineExpansionSet): via the second barycentric interpolation formula. See Berrut and Trefethen (2004) https://doi.org/10.1137/S0036144502417715 Eq. (4.2) & (9.4) """ - def __init__(self, ref_el, pts): self.nodes = numpy.array(pts).flatten() self.dmat, self.weights = make_dmat(self.nodes) @@ -37,6 +36,9 @@ def __init__(self, ref_el, pts): def get_num_members(self, n): return len(self.nodes) + def make_dmats(self, degree): + return [numpy.transpose(self.dmat)] + def tabulate(self, n, pts): assert n == len(self.nodes)-1 results = numpy.add.outer(-self.nodes, numpy.array(pts).flatten()) @@ -67,7 +69,10 @@ def __init__(self, ref_el, pts, shape=tuple()): num_exp_functions = expansions.polynomial_dimension(ref_el, degree) num_members = num_components * num_exp_functions embedded_degree = degree - expansion_set = get_expansion_set(ref_el, pts) + if ref_el.get_shape() == reference_element.LINE: + expansion_set = LagrangeLineExpansionSet(ref_el, pts) + else: + raise ValueError("Invalid reference element type.") # set up coefficients if shape == tuple(): @@ -84,15 +89,6 @@ def __init__(self, ref_el, pts, shape=tuple()): coeffs[cur_idx] = 1.0 cur_bf += 1 - dmats = [numpy.transpose(expansion_set.dmat)] + dmats = expansion_set.make_dmats(degree) super(LagrangePolynomialSet, self).__init__(ref_el, degree, embedded_degree, expansion_set, coeffs, dmats) - - -def get_expansion_set(ref_el, pts): - """Returns an ExpansionSet instance appopriate for the given - reference element.""" - if ref_el.get_shape() == reference_element.LINE: - return LagrangeLineExpansionSet(ref_el, pts) - else: - raise ValueError("Invalid reference element type.") diff --git a/FIAT/expansions.py b/FIAT/expansions.py index acfb6a3db..3bb977b79 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -201,8 +201,21 @@ def eta_cube(xi): class ExpansionSet(object): point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) - def __init__(self, ref_el): - pass + def __new__(cls, ref_el, *args, **kwargs): + """Returns an ExpansionSet instance appopriate for the given + reference element.""" + if cls is not ExpansionSet: + return super(ExpansionSet, cls).__new__(cls) + if ref_el.get_shape() == reference_element.POINT: + return PointExpansionSet(ref_el) + elif ref_el.get_shape() == reference_element.LINE: + return LineExpansionSet(ref_el) + elif ref_el.get_shape() == reference_element.TRIANGLE: + return TriangleExpansionSet(ref_el) + elif ref_el.get_shape() == reference_element.TETRAHEDRON: + return TetrahedronExpansionSet(ref_el) + else: + raise Exception("Unknown reference element type.") def _tabulate_duffy(self, degree, pts): raise NotImplementedError @@ -221,7 +234,6 @@ def make_dmats(self, degree): class PointExpansionSet(ExpansionSet): """Evaluates the point basis on a point reference element.""" - def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 0: raise ValueError("Must have a point") @@ -249,7 +261,6 @@ def tabulate_derivatives(self, n, pts): class LineExpansionSet(ExpansionSet): """Evaluates the Legendre basis on a line reference element.""" - def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 1: raise Exception("Must have a line") @@ -279,10 +290,10 @@ def tabulate(self, n, pts): return [] def _tabulate_duffy(self, n, pts): - xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) + xi = numpy.dot(pts, self.A.T) + self.b + scale = self.A[0][0] tabulations = {(0,): dubiner_1d(n, 0, xi), - (1,): dubiner_deriv_1d(n, 0, xi)} - duffy_chain_rule(self.A, xi, tabulations) + (1,): dubiner_deriv_1d(n, 0, xi) * scale} return tabulations def tabulate_derivatives(self, n, pts): @@ -316,7 +327,6 @@ def tabulate_derivatives(self, n, pts): class TriangleExpansionSet(ExpansionSet): """Evaluates the orthonormal Dubiner basis on a triangular reference element.""" - def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 2: raise Exception("Must have a triangle") @@ -432,7 +442,6 @@ def tabulate_jet(self, n, pts, order=1): class TetrahedronExpansionSet(ExpansionSet): """Collapsed orthonormal polynomial expanion on a tetrahedron.""" - def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 3: raise Exception("Must be a tetrahedron") @@ -569,21 +578,6 @@ def tabulate_jet(self, n, pts, order=1): return _tabulate_dpts(self._tabulate, 3, n, order, numpy.array(pts)) -def get_expansion_set(ref_el): - """Returns an ExpansionSet instance appopriate for the given - reference element.""" - if ref_el.get_shape() == reference_element.POINT: - return PointExpansionSet(ref_el) - elif ref_el.get_shape() == reference_element.LINE: - return LineExpansionSet(ref_el) - elif ref_el.get_shape() == reference_element.TRIANGLE: - return TriangleExpansionSet(ref_el) - elif ref_el.get_shape() == reference_element.TETRAHEDRON: - return TetrahedronExpansionSet(ref_el) - else: - raise Exception("Unknown reference element type.") - - def polynomial_dimension(ref_el, degree): """Returns the dimension of the space of polynomials of degree no greater than degree on the reference element.""" diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index ca569a9dd..85bd6feed 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -135,7 +135,7 @@ def __init__(self, ref_el, degree, shape=tuple()): num_exp_functions = expansions.polynomial_dimension(ref_el, degree) num_members = num_components * num_exp_functions embedded_degree = degree - expansion_set = expansions.get_expansion_set(ref_el) + expansion_set = expansions.ExpansionSet(ref_el) sd = ref_el.get_spatial_dimension() # set up coefficients @@ -235,7 +235,7 @@ def __init__(self, ref_el, degree, size=None): num_components = size * (size + 1) // 2 num_members = num_components * num_exp_functions embedded_degree = degree - expansion_set = expansions.get_expansion_set(ref_el) + expansion_set = expansions.ExpansionSet(ref_el) # set up coefficients for symmetric tensors coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions]) From 46cfb34a78f61558c5becacae46c350d37b96840 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 12:51:14 +0100 Subject: [PATCH 07/76] fix most tests --- FIAT/expansions.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 3bb977b79..2ad84fe55 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -294,6 +294,10 @@ def _tabulate_duffy(self, n, pts): scale = self.A[0][0] tabulations = {(0,): dubiner_1d(n, 0, xi), (1,): dubiner_deriv_1d(n, 0, xi) * scale} + for alpha in tabulations: + results = tabulations[alpha] + for k in range(n+1): + results[k, :] *= (k + 0.5)**0.5 return tabulations def tabulate_derivatives(self, n, pts): From 1ff2114ff1c34cf0b65e8929417b071665c398d5 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 12:52:27 +0100 Subject: [PATCH 08/76] remove get_expansion_set from regression tests --- test/regression/test_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/regression/test_regression.py b/test/regression/test_regression.py index 4d5151195..3e6bf77f3 100644 --- a/test/regression/test_regression.py +++ b/test/regression/test_regression.py @@ -119,7 +119,7 @@ def create_data(): E = reference_element.DefaultTriangle() k = 3 pts = reference_element.make_lattice(E.get_vertices(), k) - Phis = expansions.get_expansion_set(E) + Phis = expansions.ExpansionSet(E) phis = Phis.tabulate(k, pts) dphis = Phis.tabulate_derivatives(k, pts) return phis, dphis From b3a488d4b5177fe02030606836e9af87305a8e4b Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 15:04:26 +0100 Subject: [PATCH 09/76] remove math.prod, test up to degree 7 in 2D/3D --- FIAT/expansions.py | 15 +++++++++------ test/unit/test_gauss_legendre.py | 3 ++- test/unit/test_gauss_lobatto_legendre.py | 3 ++- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 2ad84fe55..c558c3b10 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -12,7 +12,6 @@ import sympy from FIAT import reference_element from FIAT import jacobi -from math import prod from FIAT.reference_element import UFCInterval from FIAT.quadrature import GaussLegendreQuadratureLineRule @@ -56,7 +55,6 @@ def dubiner_deriv_1d(order, dim, x): derivs += results * (-0.5*j) if j > 1: derivs *= xhat ** (j - 1) - indices = [flat_index(i, j) for i in range(n + 1)] dphi[indices, :] = derivs return dphi @@ -65,11 +63,16 @@ def dubiner_deriv_1d(order, dim, x): def duffy_chain_rule(A, eta, tabulations): dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] dim = len(eta) + eta1 = [(1. - x) * 0.5 for x in eta] for i in range(dim): for j in range(i): - dphi_dxi[i] += dphi_dxi[j] * (1. + eta[j])*0.5 * prod((1. - eta[k])*0.5 for k in range(j+1, dim) if k != i) - dphi_dxi[i] /= prod((1. - x)*0.5 for x in eta[i+1:]) - + Jij = -0.5 * (1. + eta[j]) + for k in range(j + 1, dim): + if k != i: + Jij *= eta1[k] + dphi_dxi[i] -= dphi_dxi[j] * Jij + for j in range(i + 1, dim): + dphi_dxi[i] /= eta1[j] k = 0 dphi_dx = [sum(dphi_dxi[j] * A[j][i] for j in range(dim)) for i in range(dim)] for alpha in sorted(tabulations, reverse=True): @@ -297,7 +300,7 @@ def _tabulate_duffy(self, n, pts): for alpha in tabulations: results = tabulations[alpha] for k in range(n+1): - results[k, :] *= (k + 0.5)**0.5 + results[k] *= (k + 0.5)**0.5 return tabulations def tabulate_derivatives(self, n, pts): diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index 1037581ef..5b397396e 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -35,7 +35,8 @@ def symmetric_simplex(dim): return s -@pytest.mark.parametrize("dim, degree", sum(([(d, p) for p in range(0, 8-d)] for d in range(1, 4)), [])) +@pytest.mark.parametrize("degree", range(0, 8)) +@pytest.mark.parametrize("dim", (1, 2, 3)) def test_gl_basis_values(dim, degree): """Ensure that integrating a simple monomial produces the expected results.""" from FIAT import GaussLegendre, make_quadrature diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 3cd5b9a2a..427d92b28 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -35,7 +35,8 @@ def symmetric_simplex(dim): return s -@pytest.mark.parametrize("dim, degree", sum(([(d, p) for p in range(1, 8-d)] for d in range(1, 4)), [])) +@pytest.mark.parametrize("degree", range(1, 8)) +@pytest.mark.parametrize("dim", (1, 2, 3)) def test_gll_basis_values(dim, degree): """Ensure that integrating a simple monomial produces the expected results.""" from FIAT import GaussLobattoLegendre, make_quadrature From 48b932ab4df763da46eadc02ec5666f91dc0efb7 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 16:10:24 +0100 Subject: [PATCH 10/76] style --- FIAT/expansions.py | 47 +++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index c558c3b10..9aef79618 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -24,7 +24,9 @@ def flat_index(i, j): def dubiner_1d(order, dim, x): if dim == 0: - return jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) + scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) + results = jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) + return numpy.multiply(scale[:, None], results, out=results) sd = (order + 1) * (order + 2) // 2 phi = numpy.zeros((sd, x.size), dtype=x.dtype) xhat = (1. - x) * 0.5 @@ -34,14 +36,18 @@ def dubiner_1d(order, dim, x): results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) if j > 0: results *= xhat ** j - indices = [flat_index(i, j) for i in range(n + 1)] + scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) + numpy.multiply(scale[:, None], results, out=results) + indices = [flat_index(j, i) for i in range(n + 1)] phi[indices, :] = results return phi def dubiner_deriv_1d(order, dim, x): if dim == 0: - return jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) + scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) + results = jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) + return numpy.multiply(scale[:, None], results, out=results) sd = (order + 1) * (order + 2) // 2 dphi = numpy.zeros((sd, x.size), dtype=x.dtype) xhat = (1. - x) * 0.5 @@ -55,7 +61,9 @@ def dubiner_deriv_1d(order, dim, x): derivs += results * (-0.5*j) if j > 1: derivs *= xhat ** (j - 1) - indices = [flat_index(i, j) for i in range(n + 1)] + scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) + numpy.multiply(scale[:, None], derivs, out=derivs) + indices = [flat_index(j, i) for i in range(n + 1)] dphi[indices, :] = derivs return dphi @@ -73,12 +81,11 @@ def duffy_chain_rule(A, eta, tabulations): dphi_dxi[i] -= dphi_dxi[j] * Jij for j in range(i + 1, dim): dphi_dxi[i] /= eta1[j] - k = 0 - dphi_dx = [sum(dphi_dxi[j] * A[j][i] for j in range(dim)) for i in range(dim)] + j = 0 for alpha in sorted(tabulations, reverse=True): if sum(alpha) == 1: - tabulations[alpha] = dphi_dx[k] - k += 1 + tabulations[alpha] = sum(dphi_dxi[i] * A[i][j] for i in range(dim)) + j += 1 def jrc(a, b, n): @@ -297,10 +304,6 @@ def _tabulate_duffy(self, n, pts): scale = self.A[0][0] tabulations = {(0,): dubiner_1d(n, 0, xi), (1,): dubiner_deriv_1d(n, 0, xi) * scale} - for alpha in tabulations: - results = tabulations[alpha] - for k in range(n+1): - results[k] *= (k + 0.5)**0.5 return tabulations def tabulate_derivatives(self, n, pts): @@ -412,20 +415,19 @@ def idx(p, q): sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_square(xi) - B = [dubiner_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] - D = [dubiner_deriv_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] + basis = [dubiner_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] + derivs = [dubiner_deriv_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] dim = len(eta) alphas = [(0,) * dim] alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} for alpha in alphas: - T = [Bj if aj == 0 else Dj for aj, Bj, Dj in zip(alpha, B, D)] + T = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) for i in range(n + 1): Ti = T[0][i] for j in range(n + 1 - i): - scale = ((i + 0.5) * (i + j + 1.0)) ** 0.5 - phi[idx(i, j)] = T[1][flat_index(j, i)] * Ti * scale + phi[idx(i, j)] = T[1][flat_index(i, j)] * Ti tabulations[alpha] = phi duffy_chain_rule(self.A, eta, tabulations) return tabulations @@ -547,22 +549,21 @@ def idx(p, q, r): sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_cube(xi) - B = [dubiner_1d(n, k, x) for k, x in enumerate(eta)] - D = [dubiner_deriv_1d(n, k, x) for k, x in enumerate(eta)] + basis = [dubiner_1d(n, k, x) for k, x in enumerate(eta)] + derivs = [dubiner_deriv_1d(n, k, x) for k, x in enumerate(eta)] dim = len(eta) alphas = [(0,) * dim] alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} for alpha in alphas: - T = [Dj if aj else Bj for aj, Bj, Dj in zip(alpha, B, D)] + T = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) for i in range(n + 1): Ti = T[0][i] for j in range(n + 1 - i): - Tij = T[1][flat_index(j, i)] * Ti + Tij = T[1][flat_index(i, j)] * Ti for k in range(n + 1 - i - j): - scale = ((i + 0.5) * (i + j + 1.0) * (i + j + k + 1.5)) ** 0.5 - phi[idx(i, j, k)] = T[2][flat_index(k, i + j)] * Tij * scale + phi[idx(i, j, k)] = T[2][flat_index(i + j, k)] * Tij tabulations[alpha] = phi duffy_chain_rule(self.A, eta, tabulations) return tabulations From 11e3468dedc9840e0630d7aca882f70c00425e97 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 28 Oct 2023 17:17:46 +0100 Subject: [PATCH 11/76] style --- FIAT/expansions.py | 50 +++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 9aef79618..ef2016e0a 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -12,7 +12,6 @@ import sympy from FIAT import reference_element from FIAT import jacobi - from FIAT.reference_element import UFCInterval from FIAT.quadrature import GaussLegendreQuadratureLineRule from FIAT.recursive_points import RecursivePointSet @@ -29,16 +28,16 @@ def dubiner_1d(order, dim, x): return numpy.multiply(scale[:, None], results, out=results) sd = (order + 1) * (order + 2) // 2 phi = numpy.zeros((sd, x.size), dtype=x.dtype) - xhat = (1. - x) * 0.5 - for j in range(order+1): - n = order - j - alpha = 2 * j + dim + x1 = (1. - x) * 0.5 + for i in range(order + 1): + n = order - i + alpha = 2 * i + dim results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - if j > 0: - results *= xhat ** j + if i > 0: + results *= x1 ** i scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) numpy.multiply(scale[:, None], results, out=results) - indices = [flat_index(j, i) for i in range(n + 1)] + indices = [flat_index(i, j) for j in range(n + 1)] phi[indices, :] = results return phi @@ -50,27 +49,29 @@ def dubiner_deriv_1d(order, dim, x): return numpy.multiply(scale[:, None], results, out=results) sd = (order + 1) * (order + 2) // 2 dphi = numpy.zeros((sd, x.size), dtype=x.dtype) - xhat = (1. - x) * 0.5 - for j in range(order+1): - n = order - j - alpha = 2 * j + dim + x1 = (1. - x) * 0.5 + for i in range(order + 1): + n = order - i + alpha = 2 * i + dim derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) - if j > 0: + if i > 0: results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - derivs *= xhat - derivs += results * (-0.5*j) - if j > 1: - derivs *= xhat ** (j - 1) + derivs *= x1 + derivs += results * (-0.5 * i) + if i > 1: + derivs *= x1 ** (i - 1) scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) numpy.multiply(scale[:, None], derivs, out=derivs) - indices = [flat_index(j, i) for i in range(n + 1)] + indices = [flat_index(i, j) for j in range(n + 1)] dphi[indices, :] = derivs return dphi def duffy_chain_rule(A, eta, tabulations): - dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] dim = len(eta) + dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] + if len(dphi_dxi) < dim: + return eta1 = [(1. - x) * 0.5 for x in eta] for i in range(dim): for j in range(i): @@ -301,9 +302,8 @@ def tabulate(self, n, pts): def _tabulate_duffy(self, n, pts): xi = numpy.dot(pts, self.A.T) + self.b - scale = self.A[0][0] tabulations = {(0,): dubiner_1d(n, 0, xi), - (1,): dubiner_deriv_1d(n, 0, xi) * scale} + (1,): dubiner_deriv_1d(n, 0, xi) * self.A[0][0]} return tabulations def tabulate_derivatives(self, n, pts): @@ -415,9 +415,9 @@ def idx(p, q): sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_square(xi) - basis = [dubiner_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] - derivs = [dubiner_deriv_1d(n, k, eta_k) for k, eta_k in enumerate(eta)] dim = len(eta) + basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] + derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] alphas = [(0,) * dim] alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} @@ -549,9 +549,9 @@ def idx(p, q, r): sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_cube(xi) - basis = [dubiner_1d(n, k, x) for k, x in enumerate(eta)] - derivs = [dubiner_deriv_1d(n, k, x) for k, x in enumerate(eta)] dim = len(eta) + basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] + derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] alphas = [(0,) * dim] alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) tabulations = {} From 28d35bc37f7483b8ae0d54d3d105182ebf0ca0c4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 10:44:55 +0000 Subject: [PATCH 12/76] Lazy dmats --- FIAT/barycentric_interpolation.py | 3 +- FIAT/brezzi_douglas_fortin_marini.py | 3 +- FIAT/expansions.py | 73 +++++++++++++++------------- FIAT/finite_element.py | 7 +-- FIAT/nedelec.py | 6 +-- FIAT/polynomial_set.py | 30 +++++------- FIAT/raviart_thomas.py | 3 +- 7 files changed, 59 insertions(+), 66 deletions(-) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py index a709f3986..1a80bd345 100644 --- a/FIAT/barycentric_interpolation.py +++ b/FIAT/barycentric_interpolation.py @@ -89,6 +89,5 @@ def __init__(self, ref_el, pts, shape=tuple()): coeffs[cur_idx] = 1.0 cur_bf += 1 - dmats = expansion_set.make_dmats(degree) super(LagrangePolynomialSet, self).__init__(ref_el, degree, embedded_degree, - expansion_set, coeffs, dmats) + expansion_set, coeffs) diff --git a/FIAT/brezzi_douglas_fortin_marini.py b/FIAT/brezzi_douglas_fortin_marini.py index 380dc2726..fb8f81bd8 100644 --- a/FIAT/brezzi_douglas_fortin_marini.py +++ b/FIAT/brezzi_douglas_fortin_marini.py @@ -95,8 +95,7 @@ def BDFMSpace(ref_el, order): order, order, vec_poly_set.get_expansion_set(), - new_coeffs, - vec_poly_set.get_dmats()) + new_coeffs) element_set = polynomial_set.polynomial_set_union_normalized(bubble_set, vec_poly_set) return element_set diff --git a/FIAT/expansions.py b/FIAT/expansions.py index ef2016e0a..43ffad478 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -17,10 +17,14 @@ from FIAT.recursive_points import RecursivePointSet -def flat_index(i, j): +def morton_index2(i, j): return (i + j) * (i + j + 1) // 2 + j +def morton_index3(p, q, r): + return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r + + def dubiner_1d(order, dim, x): if dim == 0: scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) @@ -37,7 +41,7 @@ def dubiner_1d(order, dim, x): results *= x1 ** i scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) numpy.multiply(scale[:, None], results, out=results) - indices = [flat_index(i, j) for j in range(n + 1)] + indices = [morton_index2(i, j) for j in range(n + 1)] phi[indices, :] = results return phi @@ -62,7 +66,7 @@ def dubiner_deriv_1d(order, dim, x): derivs *= x1 ** (i - 1) scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) numpy.multiply(scale[:, None], derivs, out=derivs) - indices = [flat_index(i, j) for j in range(n + 1)] + indices = [morton_index2(i, j) for j in range(n + 1)] dphi[indices, :] = derivs return dphi @@ -232,15 +236,22 @@ def _tabulate_duffy(self, degree, pts): raise NotImplementedError def make_dmats(self, degree): + if not hasattr(self, "_dmats_cache"): + self._dmats_cache = {} + cache = self._dmats_cache + key = degree + try: + return cache[key] + except KeyError: + pass + if degree == 0: + return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) pts = self.point_set.recursive_points(self.ref_el.get_vertices(), degree) - tabulations = self._tabulate_duffy(degree, pts) - dmats = [] - v, = [tabulations[alpha] for alpha in tabulations if sum(alpha) == 0] - for alpha in sorted(tabulations, reverse=True): - if sum(alpha) == 1: - dv = tabulations[alpha] - dmats.append(numpy.linalg.solve(v.T, dv.T)) - return dmats + tab = self._tabulate_duffy(degree, pts) + v, = [tab[alpha].T for alpha in tab if sum(alpha) == 0] + dv = numpy.stack([tab[alpha].T for alpha in sorted(tab, reverse=True) if sum(alpha) == 1]) + dmats = numpy.linalg.solve(v, dv) + return cache.setdefault(key, dmats) class PointExpansionSet(ExpansionSet): @@ -364,9 +375,7 @@ def _tabulate(self, n, pts): ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] for i in range(m1)] - def idx(p, q): - return (p + q) * (p + q + 1) // 2 + q - + idx = morton_index2 results = ((n + 1) * (n + 2) // 2) * [None] results[0] = 1.0 \ @@ -410,24 +419,23 @@ def idx(p, q): # return self.scale * results def _tabulate_duffy(self, n, pts): - def idx(p, q): - return (p + q) * (p + q + 1) // 2 + q + from FIAT.polynomial_set import mis + idx = morton_index2 sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_square(xi) dim = len(eta) basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) + alphas = mis(dim, 0) + mis(dim, 1) tabulations = {} for alpha in alphas: - T = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] - phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] + phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) for i in range(n + 1): - Ti = T[0][i] + Vi = V[0][i] for j in range(n + 1 - i): - phi[idx(i, j)] = T[1][flat_index(i, j)] * Ti + phi[idx(i, j)] = V[1][morton_index2(i, j)] * Vi tabulations[alpha] = phi duffy_chain_rule(self.A, eta, tabulations) return tabulations @@ -478,9 +486,7 @@ def _tabulate(self, n, pts): ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] for i in range(m1)] - def idx(p, q, r): - return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r - + idx = morton_index3 results = ((n + 1) * (n + 2) * (n + 3) // 6) * [None] results[0] = 1.0 \ + pts[0] - pts[0] \ @@ -544,26 +550,25 @@ def idx(p, q, r): return results def _tabulate_duffy(self, n, pts): - def idx(p, q, r): - return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r + from FIAT.polynomial_set import mis + idx = morton_index3 sd = self.get_num_members(n) xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) eta = eta_cube(xi) dim = len(eta) basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - alphas = [(0,) * dim] - alphas.extend(tuple(row) for row in numpy.eye(dim, dtype=int)) + alphas = mis(dim, 0) + mis(dim, 1) tabulations = {} for alpha in alphas: - T = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] - phi = numpy.zeros((sd, T[0].shape[1]), dtype=T[0].dtype) + V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] + phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) for i in range(n + 1): - Ti = T[0][i] + Vi = V[0][i] for j in range(n + 1 - i): - Tij = T[1][flat_index(i, j)] * Ti + Vij = V[1][morton_index2(i, j)] * Vi for k in range(n + 1 - i - j): - phi[idx(i, j, k)] = T[2][flat_index(i + j, k)] * Tij + phi[idx(i, j, k)] = V[2][morton_index2(i + j, k)] * Vij tabulations[alpha] = phi duffy_chain_rule(self.A, eta, tabulations) return tabulations diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index 711359126..44785926d 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -136,9 +136,7 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref V = numpy.dot(A, numpy.transpose(B)) self.V = V - Vinv = numpy.linalg.inv(V) - - new_coeffs_flat = numpy.dot(numpy.transpose(Vinv), B) + new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) new_shp = tuple([new_coeffs_flat.shape[0]] + list(shp[1:])) new_coeffs = numpy.reshape(new_coeffs_flat, new_shp) @@ -147,8 +145,7 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref poly_set.get_degree(), poly_set.get_embedded_degree(), poly_set.get_expansion_set(), - new_coeffs, - poly_set.get_dmats()) + new_coeffs) def degree(self): "Return the degree of the (embedding) polynomial space." diff --git a/FIAT/nedelec.py b/FIAT/nedelec.py index 862b6ee2d..e419a1eb7 100644 --- a/FIAT/nedelec.py +++ b/FIAT/nedelec.py @@ -68,8 +68,7 @@ def rot_x_foo(a): k + 1, k + 1, vec_Pkp1.get_expansion_set(), - PkH_crossx_coeffs, - vec_Pkp1.get_dmats()) + PkH_crossx_coeffs) return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1, PkHcrossx) @@ -136,8 +135,7 @@ def NedelecSpace3D(ref_el, k): k + 1, k + 1, vec_Pkp1.get_expansion_set(), - PkCrossXcoeffs, - vec_Pkp1.get_dmats()) + PkCrossXcoeffs) return polynomial_set.polynomial_set_union_normalized(vec_Pk, PkCrossX) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 85bd6feed..d1ecf00c7 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -54,15 +54,14 @@ class PolynomialSet(object): function. """ - def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, - dmats): + def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs): self.ref_el = ref_el self.num_members = coeffs.shape[0] self.degree = degree self.embedded_degree = embedded_degree self.expansion_set = expansion_set self.coeffs = coeffs - self.dmats = dmats + self.dmats = [] def tabulate_new(self, pts): return numpy.dot(self.coeffs, @@ -72,11 +71,14 @@ def tabulate(self, pts, jet_order=0): """Returns the values of the polynomial set.""" result = {} base_vals = self.expansion_set.tabulate(self.embedded_degree, pts) + dmats = self.get_dmats() if jet_order > 0 else self.dmats for i in range(jet_order + 1): alphas = mis(self.ref_el.get_spatial_dimension(), i) for alpha in alphas: - if len(self.dmats) > 0: - D = form_matrix_product(self.dmats, alpha) + if sum(alpha) == 0: + D = numpy.eye(len(base_vals)) + elif len(dmats) > 0: + D = form_matrix_product(dmats, alpha) else: # special for vertex without defined point location assert pts == [()] @@ -102,6 +104,8 @@ def get_embedded_degree(self): return self.embedded_degree def get_dmats(self): + if len(self.dmats) == 0: + self.dmats = self.expansion_set.make_dmats(self.degree) return self.dmats def get_reference_element(self): @@ -116,7 +120,7 @@ def take(self, items): """Extracts subset of polynomials given by items.""" new_coeffs = numpy.take(self.get_coeffs(), items, 0) return PolynomialSet(self.ref_el, self.degree, self.embedded_degree, - self.expansion_set, new_coeffs, self.dmats) + self.expansion_set, new_coeffs) class ONPolynomialSet(PolynomialSet): @@ -136,7 +140,6 @@ def __init__(self, ref_el, degree, shape=tuple()): num_members = num_components * num_exp_functions embedded_degree = degree expansion_set = expansions.ExpansionSet(ref_el) - sd = ref_el.get_spatial_dimension() # set up coefficients coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions]) @@ -154,13 +157,8 @@ def __init__(self, ref_el, degree, shape=tuple()): cur_idx = tuple([cur_bf] + list(idx) + [exp_bf]) coeffs[cur_idx] = 1.0 cur_bf += 1 - # construct dmats - if degree == 0: - dmats = [numpy.array([[0.0]], "d") for i in range(sd)] - else: - dmats = expansion_set.make_dmats(degree) PolynomialSet.__init__(self, ref_el, degree, embedded_degree, - expansion_set, coeffs, dmats) + expansion_set, coeffs) def project(f, U, Q): @@ -214,8 +212,7 @@ def polynomial_set_union_normalized(A, B): A.get_degree(), A.get_embedded_degree(), A.get_expansion_set(), - coeffs, - A.get_dmats()) + coeffs) class ONSymTensorPolynomialSet(PolynomialSet): @@ -257,6 +254,5 @@ def __init__(self, ref_el, degree, size=None): cur_bf += 1 # construct dmats. this is the same as ONPolynomialSet. - dmats = expansion_set.make_dmats(degree) PolynomialSet.__init__(self, ref_el, degree, embedded_degree, - expansion_set, coeffs, dmats) + expansion_set, coeffs) diff --git a/FIAT/raviart_thomas.py b/FIAT/raviart_thomas.py index 69907d046..d8233cbe2 100644 --- a/FIAT/raviart_thomas.py +++ b/FIAT/raviart_thomas.py @@ -55,8 +55,7 @@ def RTSpace(ref_el, deg): deg, deg + 1, vec_Pkp1.get_expansion_set(), - PkHx_coeffs, - vec_Pkp1.get_dmats()) + PkHx_coeffs) return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1, PkHx) From cc48c59932949de10b943adc3a3d9d7264639fc4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 14:45:35 +0000 Subject: [PATCH 13/76] Unlazy dmats for NodalEnrichedElement --- FIAT/polynomial_set.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index d1ecf00c7..4242b6266 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -54,14 +54,14 @@ class PolynomialSet(object): function. """ - def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs): + def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats=None): self.ref_el = ref_el self.num_members = coeffs.shape[0] self.degree = degree self.embedded_degree = embedded_degree self.expansion_set = expansion_set self.coeffs = coeffs - self.dmats = [] + self.dmats = dmats or [] def tabulate_new(self, pts): return numpy.dot(self.coeffs, From f48ea4fe655507497774fd0637bd918b5be80bb7 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 14:49:36 +0000 Subject: [PATCH 14/76] NodalEnrichedElement only uses a single expansion set, the dmat can therefore be lazy --- FIAT/nodal_enriched.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/FIAT/nodal_enriched.py b/FIAT/nodal_enriched.py index eebb2eb51..cd50a2d6b 100644 --- a/FIAT/nodal_enriched.py +++ b/FIAT/nodal_enriched.py @@ -57,13 +57,11 @@ def __init__(self, *elements): # Merge polynomial sets coeffs = _merge_coeffs([e.get_coeffs() for e in elements]) - dmats = _merge_dmats([e.dmats() for e in elements]) poly_set = PolynomialSet(ref_el, degree, embedded_degree, expansion_set, - coeffs, - dmats) + coeffs) # Renumber dof numbers offsets = np.cumsum([0] + [e.space_dimension() for e in elements[:-1]]) @@ -104,19 +102,6 @@ def _merge_coeffs(coeffss): return new_coeffs -def _merge_dmats(dmatss): - shape, arg = max((dmats[0].shape, args) for args, dmats in enumerate(dmatss)) - assert len(shape) == 2 and shape[0] == shape[1] - new_dmats = [] - for dim in range(len(dmatss[arg])): - new_dmats.append(dmatss[arg][dim].copy()) - for dmats in dmatss: - sl = slice(0, dmats[dim].shape[0]), slice(0, dmats[dim].shape[1]) - assert np.allclose(dmats[dim], new_dmats[dim][sl]), \ - "dmats of elements to be directly summed are not matching!" - return new_dmats - - def _merge_entity_ids(entity_ids, offsets): ret = {} for i, ids in enumerate(entity_ids): From daaab1e8da662147e603da15edea3f2640bc52bf Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 16:34:06 +0000 Subject: [PATCH 15/76] super constructor for ExpansionSet --- FIAT/expansions.py | 44 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 43ffad478..7a806fc99 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -232,12 +232,21 @@ def __new__(cls, ref_el, *args, **kwargs): else: raise Exception("Unknown reference element type.") - def _tabulate_duffy(self, degree, pts): - raise NotImplementedError + def __init__(self, ref_el): + self.ref_el = ref_el + dim = ref_el.get_spatial_dimension() + self.base_ref_el = reference_element.default_simplex(dim) + v1 = ref_el.get_vertices() + v2 = self.base_ref_el.get_vertices() + self.A, self.b = reference_element.make_affine_mapping(v1, v2) + self.mapping = lambda x: numpy.dot(self.A, x) + self.b + self.scale = numpy.sqrt(numpy.linalg.det(self.A)) + self._dmats_cache = {} + + def _tabulate_duffy(self, n, pts): + raise NotImplementedError() def make_dmats(self, degree): - if not hasattr(self, "_dmats_cache"): - self._dmats_cache = {} cache = self._dmats_cache key = degree try: @@ -259,8 +268,7 @@ class PointExpansionSet(ExpansionSet): def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 0: raise ValueError("Must have a point") - self.ref_el = ref_el - self.base_ref_el = reference_element.Point() + super(PointExpansionSet, self).__init__(ref_el) def get_num_members(self, n): return 1 @@ -286,13 +294,7 @@ class LineExpansionSet(ExpansionSet): def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 1: raise Exception("Must have a line") - self.ref_el = ref_el - self.base_ref_el = reference_element.DefaultLine() - v1 = ref_el.get_vertices() - v2 = self.base_ref_el.get_vertices() - self.A, self.b = reference_element.make_affine_mapping(v1, v2) - self.mapping = lambda x: numpy.dot(self.A, x) + self.b - self.scale = numpy.sqrt(numpy.linalg.det(self.A)) + super(LineExpansionSet, self).__init__(ref_el) def get_num_members(self, n): return n + 1 @@ -351,13 +353,7 @@ class TriangleExpansionSet(ExpansionSet): def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 2: raise Exception("Must have a triangle") - self.ref_el = ref_el - self.base_ref_el = reference_element.DefaultTriangle() - v1 = ref_el.get_vertices() - v2 = self.base_ref_el.get_vertices() - self.A, self.b = reference_element.make_affine_mapping(v1, v2) - self.mapping = lambda x: numpy.dot(self.A, x) + self.b -# self.scale = numpy.sqrt(numpy.linalg.det(self.A)) + super(TriangleExpansionSet, self).__init__(ref_el) def get_num_members(self, n): return (n + 1) * (n + 2) // 2 @@ -462,13 +458,7 @@ class TetrahedronExpansionSet(ExpansionSet): def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 3: raise Exception("Must be a tetrahedron") - self.ref_el = ref_el - self.base_ref_el = reference_element.DefaultTetrahedron() - v1 = ref_el.get_vertices() - v2 = self.base_ref_el.get_vertices() - self.A, self.b = reference_element.make_affine_mapping(v1, v2) - self.mapping = lambda x: numpy.dot(self.A, x) + self.b - self.scale = numpy.sqrt(numpy.linalg.det(self.A)) + super(TetrahedronExpansionSet, self).__init__(ref_el) def get_num_members(self, n): return (n + 1) * (n + 2) * (n + 3) // 6 From 9064496c674d59c692126398a82e65144d744968 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 17:17:00 +0000 Subject: [PATCH 16/76] Common Duffy tabulation --- FIAT/expansions.py | 84 +++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 54 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 7a806fc99..b1e8d46ca 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -230,7 +230,7 @@ def __new__(cls, ref_el, *args, **kwargs): elif ref_el.get_shape() == reference_element.TETRAHEDRON: return TetrahedronExpansionSet(ref_el) else: - raise Exception("Unknown reference element type.") + raise ValueError("Invalid reference element type.") def __init__(self, ref_el): self.ref_el = ref_el @@ -244,7 +244,35 @@ def __init__(self, ref_el): self._dmats_cache = {} def _tabulate_duffy(self, n, pts): - raise NotImplementedError() + from FIAT.polynomial_set import mis + dim = self.ref_el.get_spatial_dimension() + sd = self.get_num_members(n) + xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) + eta = (lambda x: x, lambda x: x, eta_square, eta_cube)[dim](xi) + basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] + derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] + alphas = mis(dim, 0) + mis(dim, 1) + tabulations = {} + for alpha in alphas: + V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] + phi = V[0] + if dim >= 2: + phi1 = phi + phi = numpy.copy(V[1]) + for i in range(n + 1): + indices = [morton_index2(i, j) for j in range(n + 1 - i)] + phi[indices] *= phi1[i] + if dim >= 3: + phi2 = phi + phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) + for i in range(n + 1): + for j in range(n + 1 - i): + Vij = phi2[morton_index2(i, j)] + for k in range(n + 1 - i - j): + phi[morton_index3(i, j, k)] = V[2][morton_index2(i + j, k)] * Vij + tabulations[alpha] = phi + duffy_chain_rule(self.A, eta, tabulations) + return tabulations def make_dmats(self, degree): cache = self._dmats_cache @@ -313,12 +341,6 @@ def tabulate(self, n, pts): else: return [] - def _tabulate_duffy(self, n, pts): - xi = numpy.dot(pts, self.A.T) + self.b - tabulations = {(0,): dubiner_1d(n, 0, xi), - (1,): dubiner_deriv_1d(n, 0, xi) * self.A[0][0]} - return tabulations - def tabulate_derivatives(self, n, pts): """Returns a tuple of length one (A,) such that A[i,j] = D phi_i(pts[j]). The tuple is returned for @@ -414,28 +436,6 @@ def _tabulate(self, n, pts): return results # return self.scale * results - def _tabulate_duffy(self, n, pts): - from FIAT.polynomial_set import mis - idx = morton_index2 - sd = self.get_num_members(n) - xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) - eta = eta_square(xi) - dim = len(eta) - basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] - derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - alphas = mis(dim, 0) + mis(dim, 1) - tabulations = {} - for alpha in alphas: - V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] - phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) - for i in range(n + 1): - Vi = V[0][i] - for j in range(n + 1 - i): - phi[idx(i, j)] = V[1][morton_index2(i, j)] * Vi - tabulations[alpha] = phi - duffy_chain_rule(self.A, eta, tabulations) - return tabulations - def tabulate_derivatives(self, n, pts): order = 1 data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) @@ -539,30 +539,6 @@ def _tabulate(self, n, pts): return results - def _tabulate_duffy(self, n, pts): - from FIAT.polynomial_set import mis - idx = morton_index3 - sd = self.get_num_members(n) - xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) - eta = eta_cube(xi) - dim = len(eta) - basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] - derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - alphas = mis(dim, 0) + mis(dim, 1) - tabulations = {} - for alpha in alphas: - V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] - phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) - for i in range(n + 1): - Vi = V[0][i] - for j in range(n + 1 - i): - Vij = V[1][morton_index2(i, j)] * Vi - for k in range(n + 1 - i - j): - phi[idx(i, j, k)] = V[2][morton_index2(i + j, k)] * Vij - tabulations[alpha] = phi - duffy_chain_rule(self.A, eta, tabulations) - return tabulations - def tabulate_derivatives(self, n, pts): order = 1 D = 3 From 7cfb08332427762966df70b522f31f531f4be940 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 22:15:52 +0000 Subject: [PATCH 17/76] DefaultPoint --- FIAT/polynomial_set.py | 4 ++-- FIAT/reference_element.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 4242b6266..d1ecf00c7 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -54,14 +54,14 @@ class PolynomialSet(object): function. """ - def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs, dmats=None): + def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs): self.ref_el = ref_el self.num_members = coeffs.shape[0] self.degree = degree self.embedded_degree = embedded_degree self.expansion_set = expansion_set self.coeffs = coeffs - self.dmats = dmats or [] + self.dmats = [] def tabulate_new(self, pts): return numpy.dot(self.coeffs, diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 7a4f63e6e..5b2234cf8 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -1266,7 +1266,9 @@ def make_affine_mapping(xs, ys): def default_simplex(spatial_dim): """Factory function that maps spatial dimension to an instance of the default reference simplex of that dimension.""" - if spatial_dim == 1: + if spatial_dim == 0: + return Point() + elif spatial_dim == 1: return DefaultLine() elif spatial_dim == 2: return DefaultTriangle() From f3bacd1b98ab0f9fbf5bfd93fbd5cc900510c13c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 29 Oct 2023 22:59:29 +0000 Subject: [PATCH 18/76] refactoring, some comments --- FIAT/expansions.py | 157 +++++++++++++++++++++++++-------------------- 1 file changed, 89 insertions(+), 68 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index b1e8d46ca..f40d5d322 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -25,74 +25,6 @@ def morton_index3(p, q, r): return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r -def dubiner_1d(order, dim, x): - if dim == 0: - scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) - results = jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) - return numpy.multiply(scale[:, None], results, out=results) - sd = (order + 1) * (order + 2) // 2 - phi = numpy.zeros((sd, x.size), dtype=x.dtype) - x1 = (1. - x) * 0.5 - for i in range(order + 1): - n = order - i - alpha = 2 * i + dim - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - if i > 0: - results *= x1 ** i - scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) - numpy.multiply(scale[:, None], results, out=results) - indices = [morton_index2(i, j) for j in range(n + 1)] - phi[indices, :] = results - return phi - - -def dubiner_deriv_1d(order, dim, x): - if dim == 0: - scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) - results = jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) - return numpy.multiply(scale[:, None], results, out=results) - sd = (order + 1) * (order + 2) // 2 - dphi = numpy.zeros((sd, x.size), dtype=x.dtype) - x1 = (1. - x) * 0.5 - for i in range(order + 1): - n = order - i - alpha = 2 * i + dim - derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) - if i > 0: - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - derivs *= x1 - derivs += results * (-0.5 * i) - if i > 1: - derivs *= x1 ** (i - 1) - scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) - numpy.multiply(scale[:, None], derivs, out=derivs) - indices = [morton_index2(i, j) for j in range(n + 1)] - dphi[indices, :] = derivs - return dphi - - -def duffy_chain_rule(A, eta, tabulations): - dim = len(eta) - dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] - if len(dphi_dxi) < dim: - return - eta1 = [(1. - x) * 0.5 for x in eta] - for i in range(dim): - for j in range(i): - Jij = -0.5 * (1. + eta[j]) - for k in range(j + 1, dim): - if k != i: - Jij *= eta1[k] - dphi_dxi[i] -= dphi_dxi[j] * Jij - for j in range(i + 1, dim): - dphi_dxi[i] /= eta1[j] - j = 0 - for alpha in sorted(tabulations, reverse=True): - if sum(alpha) == 1: - tabulations[alpha] = sum(dphi_dxi[i] * A[i][j] for i in range(dim)) - j += 1 - - def jrc(a, b, n): an = (2*n+1+a+b)*(2*n+2+a+b) / (2*(n+1)*(n+1+a+b)) bn = (a*a-b*b) * (2*n+1+a+b) / (2*(n+1)*(2*n+a+b)*(n+1+a+b)) @@ -213,6 +145,86 @@ def eta_cube(xi): return eta1, eta2, eta3 +def dubiner_1d(order, dim, x): + """Returns a tabulation of the orthonormal Dubiner 1D polymoials, defined as + c_i P_i^(0,0)(x) if dim == 0, + c_ij (1-x)^i P_j^(2i + dim, 0)(x) otherwise.""" + if dim == 0: + scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) + results = jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) + return numpy.multiply(scale[:, None], results, out=results) + sd = (order + 1) * (order + 2) // 2 + phi = numpy.zeros((sd, x.size), dtype=x.dtype) + x1 = (1. - x) * 0.5 + for i in range(order + 1): + n = order - i + alpha = 2 * i + dim + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + if i > 0: + results *= x1 ** i + scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) + numpy.multiply(scale[:, None], results, out=results) + indices = [morton_index2(i, j) for j in range(n + 1)] + phi[indices, :] = results + return phi + + +def dubiner_deriv_1d(order, dim, x): + """Returns a tabulation of the first derivatives of the orthonormal Dubiner + 1D polynomials.""" + if dim == 0: + scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) + results = jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) + return numpy.multiply(scale[:, None], results, out=results) + sd = (order + 1) * (order + 2) // 2 + dphi = numpy.zeros((sd, x.size), dtype=x.dtype) + x1 = (1. - x) * 0.5 + for i in range(order + 1): + n = order - i + alpha = 2 * i + dim + derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) + if i > 0: + results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) + derivs *= x1 + derivs += results * (-0.5 * i) + if i > 1: + derivs *= x1 ** (i - 1) + scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) + numpy.multiply(scale[:, None], derivs, out=derivs) + indices = [morton_index2(i, j) for j in range(n + 1)] + dphi[indices, :] = derivs + return dphi + + +def duffy_chain_rule(A, eta, tabulations): + """Applies the chain rule associated with an affine transformation onto the + default simplex on (-1, 1), followed by the Duffy transformation onto [-1, 1]^d. + A: the Jacobian of the affine transformation + eta: the points in [-1, 1]^d + tabulations: On entry, the tabulations of the reference gradient with + respect to eta. On exit, the tabulations of the gradient in physical space. + """ + dim = len(eta) + dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] + if len(dphi_dxi) < dim: + return + eta1 = [(1. - x) * 0.5 for x in eta] + for i in range(dim): + for j in range(i): + Jij = -0.5 * (1. + eta[j]) + for k in range(j + 1, dim): + if k != i: + Jij *= eta1[k] + dphi_dxi[i] -= dphi_dxi[j] * Jij + for j in range(i + 1, dim): + dphi_dxi[i] /= eta1[j] + j = 0 + for alpha in sorted(tabulations, reverse=True): + if sum(alpha) == 1: + tabulations[alpha] = sum(dphi_dxi[i] * A[i][j] for i in range(dim)) + j += 1 + + class ExpansionSet(object): point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) @@ -244,6 +256,11 @@ def __init__(self, ref_el): self._dmats_cache = {} def _tabulate_duffy(self, n, pts): + """Returns a dict of tabulations of phi_i(pts[j]) and each component of + the gradient d/dx_k phi_i(pts[j]). Here we employ the Duffy transform, + and thus this tabulation mode is only recommended for use with interior + points. + """ from FIAT.polynomial_set import mis dim = self.ref_el.get_spatial_dimension() sd = self.get_num_members(n) @@ -275,6 +292,10 @@ def _tabulate_duffy(self, n, pts): return tabulations def make_dmats(self, degree): + """Returns a numpy array with the expansion coefficients dmat[k, j, i] + of the gradient of each member of the expansion set: + d/dx_k phi_j = sum_i dmat[k, j, i] phi_i. + """ cache = self._dmats_cache key = degree try: From 02dcf7a1d21b23e5802bbbdc3f2c69566706eca4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 30 Oct 2023 10:54:37 +0000 Subject: [PATCH 19/76] move derivative tabulation from PolynomialSet to ExpansionSet --- FIAT/expansions.py | 19 ++++++++++++++++++- FIAT/polynomial_set.py | 30 +++--------------------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index f40d5d322..42c2a33af 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -268,8 +268,8 @@ def _tabulate_duffy(self, n, pts): eta = (lambda x: x, lambda x: x, eta_square, eta_cube)[dim](xi) basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - alphas = mis(dim, 0) + mis(dim, 1) tabulations = {} + alphas = mis(dim, 0) + mis(dim, 1) for alpha in alphas: V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] phi = V[0] @@ -311,6 +311,23 @@ def make_dmats(self, degree): dmats = numpy.linalg.solve(v, dv) return cache.setdefault(key, dmats) + def _tabulate_jet(self, degree, pts, order=0): + from FIAT.polynomial_set import mis + result = {} + base_vals = self.tabulate(degree, pts) + dmats = self.make_dmats(degree) if order > 0 else [] + for i in range(order + 1): + alphas = mis(self.ref_el.get_spatial_dimension(), i) + for alpha in alphas: + beta = next((beta for beta in sorted(result, reverse=True) + if all(bj <= aj for bj, aj in zip(beta, alpha))), (0,) * len(alpha)) + vals = base_vals if sum(beta) == 0 else result[beta] + for dmat, start, end in zip(dmats, beta, alpha): + for j in range(start, end): + vals = numpy.dot(dmat.T, vals) + result[alpha] = vals + return result + class PointExpansionSet(ExpansionSet): """Evaluates the point basis on a point reference element.""" diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index d1ecf00c7..502675cda 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -69,23 +69,9 @@ def tabulate_new(self, pts): def tabulate(self, pts, jet_order=0): """Returns the values of the polynomial set.""" - result = {} - base_vals = self.expansion_set.tabulate(self.embedded_degree, pts) - dmats = self.get_dmats() if jet_order > 0 else self.dmats - for i in range(jet_order + 1): - alphas = mis(self.ref_el.get_spatial_dimension(), i) - for alpha in alphas: - if sum(alpha) == 0: - D = numpy.eye(len(base_vals)) - elif len(dmats) > 0: - D = form_matrix_product(dmats, alpha) - else: - # special for vertex without defined point location - assert pts == [()] - D = numpy.eye(1) - result[alpha] = numpy.dot(self.coeffs, - numpy.dot(numpy.transpose(D), - base_vals)) + result = self.expansion_set._tabulate_jet(self.embedded_degree, pts, order=jet_order) + for alpha in result: + result[alpha] = numpy.dot(self.coeffs, result[alpha]) return result def get_expansion_set(self): @@ -175,16 +161,6 @@ def project(f, U, Q): return coeffs -def form_matrix_product(mats, alpha): - """Forms product over mats[i]**alpha[i]""" - m = mats[0].shape[0] - result = numpy.eye(m) - for i in range(len(alpha)): - for j in range(alpha[i]): - result = numpy.dot(mats[i], result) - return result - - def polynomial_set_union_normalized(A, B): """Given polynomial sets A and B, constructs a new polynomial set whose span is the same as that of span(A) union span(B). It may From c0067a7359e9748609ff6c3e73a9b717f4b936aa Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 10:45:28 +0000 Subject: [PATCH 20/76] restor form_matrix_product --- FIAT/polynomial_set.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 502675cda..cfde1a84d 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -161,6 +161,16 @@ def project(f, U, Q): return coeffs +def form_matrix_product(mats, alpha): + """Forms product over mats[i]**alpha[i]""" + m = mats[0].shape[0] + result = numpy.eye(m) + for i in range(len(alpha)): + for j in range(alpha[i]): + result = numpy.dot(mats[i], result) + return result + + def polynomial_set_union_normalized(A, B): """Given polynomial sets A and B, constructs a new polynomial set whose span is the same as that of span(A) union span(B). It may From 220f221cc3a8e9d5357f54b5876b3de5c3f6deff Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 15:29:39 +0000 Subject: [PATCH 21/76] Hand-written differentation of recurrence relations --- FIAT/expansions.py | 200 ++++++++++++++++++++++++++------------------- 1 file changed, 116 insertions(+), 84 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 42c2a33af..10c3c4fc5 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -407,6 +407,95 @@ def tabulate_derivatives(self, n, pts): return dv +def recurrence(n, x, factors, phi, dx=None, dfactors=None, dphi=None): + dim = len(x) + if dim == 2: + idx = morton_index2 + elif dim == 3: + idx = lambda p, q: morton_index3(p, q, 0) + else: + raise ValueError("Invalid number of spatial dimensions") + f1, f2, f3, f4, f5 = factors + if dfactors is not None: + df1, df2, df3, df4, df5 = dfactors + + # p = 1 + phi[idx(1, 0)] = f1 + if dphi is not None: + dphi[idx(1, 0)] = df1 + + # general p by recurrence + for p in range(1, n): + icur = idx(p, 0) + inext = idx(p + 1, 0) + iprev = idx(p - 1, 0) + a = (2. * p + 1.) / (1. + p) + b = p / (1. + p) + phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] + if dphi is None: + continue + dphi[inext] = a * f1 * dphi[icur] - b * f2 * dphi[iprev] \ + + a * phi[icur] * df1 - b * phi[iprev] * df2 + + # q = 1 + for p in range(n): + icur = idx(p, 0) + inext = idx(p, 1) + g = (p + 0.5) * (1 + x[1]) + f3 + phi[inext] = g * phi[icur] + if dphi is None: + continue + dg = (p + 0.5) * dx[1] + df3 + dphi[inext] = g * dphi[icur] + phi[icur] * dg + + # general q by recurrence + for p in range(n - 1): + for q in range(1, n - p): + icur = idx(p, q) + inext = idx(p, q + 1) + iprev = idx(p, q - 1) + aq, bq, cq = jrc(2 * p + 1, 0, q) + g = aq * f3 + bq * f4 + h = cq * f5 + phi[inext] = g * phi[icur] - h * phi[iprev] + if dphi is None: + continue + dg = aq * df3 + bq * df4 + dh = cq * df5 + dphi[inext] = g * dphi[icur] + phi[icur] * dg - h * dphi[iprev] - phi[iprev] * dh + + if dim < 3: + return + idx = morton_index3 + + # r = 1 + for p in range(n): + for q in range(n - p): + icur = idx(p, q, 0) + inext = idx(p, q, 1) + a = 2.0 + p + q + b = 1.0 + p + q + g = a * x[2] + b + phi[inext] = g * phi[icur] + if dphi is None: + continue + dg = a * dx[2] + dphi[inext] = g * dphi[icur] + phi[icur] * dg + + # general r by recurrence + for p in range(n - 1): + for q in range(0, n - p - 1): + for r in range(1, n - p - q): + icur = idx(p, q, r) + inext = idx(p, q, r + 1) + iprev = idx(p, q, r - 1) + ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) + phi[inext] = (ar * x[2] + br) * phi[icur] - cr * phi[iprev] + if dphi is None: + continue + dphi[inext] = (ar * x[2] + br) * dphi[icur] + ar * phi[icur] * dx[2] - cr * dphi[iprev] + + class TriangleExpansionSet(ExpansionSet): """Evaluates the orthonormal Dubiner basis on a triangular reference element.""" @@ -427,52 +516,29 @@ def tabulate(self, n, pts): def _tabulate(self, n, pts): '''A version of tabulate() that also works for a single point. ''' - m1, m2 = self.A.shape - ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] - for i in range(m1)] - - idx = morton_index2 results = ((n + 1) * (n + 2) // 2) * [None] - - results[0] = 1.0 \ - + pts[0] - pts[0] \ - + pts[1] - pts[1] - + results[0] = 1. + pts[0] - pts[0] if n == 0: return results + m1, m2 = self.A.shape + ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) + for i in range(m1)] x = ref_pts[0] y = ref_pts[1] - f1 = (1.0 + 2 * x + y) / 2.0 - f2 = (1.0 - y) / 2.0 - f3 = f2**2 - - results[idx(1, 0)] = f1 - - for p in range(1, n): - a = (2.0 * p + 1) / (1.0 + p) - # b = p / (p+1.0) - results[idx(p+1, 0)] = a * f1 * results[idx(p, 0)] \ - - p/(1.0+p) * f3 * results[idx(p-1, 0)] - - for p in range(n): - results[idx(p, 1)] = 0.5 * (1+2.0*p+(3.0+2.0*p)*y) \ - * results[idx(p, 0)] - - for p in range(n - 1): - for q in range(1, n - p): - (a1, a2, a3) = jrc(2 * p + 1, 0, q) - results[idx(p, q+1)] = \ - (a1 * y + a2) * results[idx(p, q)] \ - - a3 * results[idx(p, q-1)] - + factors = [(1. + 2 * x + y) / 2., + (y - 1.) ** 2 / 4., + y, + y, 1.] + recurrence(n, ref_pts, factors, results) + idx = morton_index2 for p in range(n + 1): for q in range(n - p + 1): - results[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0)) - + icur = idx(p, q) + a = math.sqrt((p + 0.5) * (p + q + 1.0)) + results[icur] *= a return results - # return self.scale * results def tabulate_derivatives(self, n, pts): order = 1 @@ -510,11 +576,6 @@ def tabulate(self, n, pts): def _tabulate(self, n, pts): '''A version of tabulate() that also works for a single point. ''' - m1, m2 = self.A.shape - ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] - for i in range(m1)] - - idx = morton_index3 results = ((n + 1) * (n + 2) * (n + 3) // 6) * [None] results[0] = 1.0 \ + pts[0] - pts[0] \ @@ -524,57 +585,28 @@ def _tabulate(self, n, pts): if n == 0: return results + m1, m2 = self.A.shape + ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] + for i in range(m1)] x = ref_pts[0] y = ref_pts[1] z = ref_pts[2] - factor1 = 0.5 * (2.0 + 2.0 * x + y + z) - factor2 = (0.5 * (y + z))**2 - factor3 = 0.5 * (1 + 2.0 * y + z) - factor4 = 0.5 * (1 - z) - factor5 = factor4**2 - - results[idx(1, 0, 0)] = factor1 - for p in range(1, n): - a1 = (2.0 * p + 1.0) / (p + 1.0) - a2 = p / (p + 1.0) - results[idx(p+1, 0, 0)] = a1 * factor1 * results[idx(p, 0, 0)] \ - - a2 * factor2 * results[idx(p-1, 0, 0)] - - # q = 1 - for p in range(0, n): - results[idx(p, 1, 0)] = results[idx(p, 0, 0)] \ - * (p * (1.0 + y) + (2.0 + 3.0 * y + z) / 2) - - for p in range(0, n - 1): - for q in range(1, n - p): - (aq, bq, cq) = jrc(2 * p + 1, 0, q) - qmcoeff = aq * factor3 + bq * factor4 - qm1coeff = cq * factor5 - results[idx(p, q+1, 0)] = qmcoeff * results[idx(p, q, 0)] \ - - qm1coeff * results[idx(p, q-1, 0)] - - # now handle r=1 - for p in range(n): - for q in range(n - p): - results[idx(p, q, 1)] = results[idx(p, q, 0)] \ - * (1.0 + p + q + (2.0 + q + p) * z) - - # general r by recurrence - for p in range(n - 1): - for q in range(0, n - p - 1): - for r in range(1, n - p - q): - ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) - results[idx(p, q, r+1)] = \ - (ar * z + br) * results[idx(p, q, r)] \ - - cr * results[idx(p, q, r-1)] + factors = [0.5 * (2.0 + 2.0 * x + y + z), + (0.5 * (y + z))**2, + 0.5 * (1 + 2.0 * y + z), + 0.5 * (1 - z)] + factors.append(factors[3]**2) + + recurrence(n, ref_pts, factors, results) + idx = morton_index3 for p in range(n + 1): for q in range(n - p + 1): for r in range(n - p - q + 1): - results[idx(p, q, r)] *= \ - math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5)) - + icur = idx(p, q, r) + a = math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5)) + results[icur] *= a return results def tabulate_derivatives(self, n, pts): From 2dd86705aa9543eed8ba1fad2908432d80c04363 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 19:01:22 +0000 Subject: [PATCH 22/76] derivative recursion, but wrong result --- FIAT/expansions.py | 330 ++++++++++++++++++++++++++------------------- 1 file changed, 192 insertions(+), 138 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 10c3c4fc5..6a3732d6d 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -17,11 +17,11 @@ from FIAT.recursive_points import RecursivePointSet -def morton_index2(i, j): +def morton_index2(i, j=0): return (i + j) * (i + j + 1) // 2 + j -def morton_index3(p, q, r): +def morton_index3(p, q=0, r=0): return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r @@ -225,6 +225,103 @@ def duffy_chain_rule(A, eta, tabulations): j += 1 +def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): + if dim == 1: + idx = lambda p: p + elif dim == 2: + idx = morton_index2 + elif dim == 3: + idx = morton_index3 + else: + raise ValueError("Invalid number of spatial dimensions") + + skip_derivs = dphi is None + f1, f2, f3, f4 = factors + f5 = f4 ** 2 + if dfactors is not None: + df1, df2, df3, df4 = dfactors + df5 = 2 * f4 * df4 + + # p = 1 + phi[idx(1)] = f1 + if dphi is not None: + dphi[idx(1)] = df1 + + # general p by recurrence + for p in range(1, n): + icur = idx(p) + inext = idx(p + 1) + iprev = idx(p - 1) + a = (2. * p + 1.) / (1. + p) + b = p / (1. + p) + phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] + if skip_derivs: + continue + dphi[inext] = a * f1 * dphi[icur] - b * f2 * dphi[iprev] \ + + a * phi[icur] * df1 - b * phi[iprev] * df2 + if dim < 2: + return + + # q = 1 + for p in range(n): + icur = idx(p, 0) + inext = idx(p, 1) + g = (p + 1.5) * f3 - f4 + phi[inext] = g * phi[icur] + if dphi is None: + continue + dg = (p + 1.5) * df3 - df4 + dphi[inext] = g * dphi[icur] + phi[icur] * dg + + # general q by recurrence + for p in range(n - 1): + for q in range(1, n - p): + icur = idx(p, q) + inext = idx(p, q + 1) + iprev = idx(p, q - 1) + aq, bq, cq = jrc(2 * p + 1, 0, q) + g = aq * f3 + (bq - aq) * f4 + h = cq * f5 + phi[inext] = g * phi[icur] - h * phi[iprev] + if skip_derivs: + continue + dg = aq * df3 + (bq - aq) * df4 + dh = cq * df5 + dphi[inext] = g * dphi[icur] + phi[icur] * dg - h * dphi[iprev] - phi[iprev] * dh + if dim < 3: + return + + z = 1 - 2 * f4 + if dfactors: + dz = -2 * df4 + # r = 1 + for p in range(n): + for q in range(n - p): + icur = idx(p, q, 0) + inext = idx(p, q, 1) + a = 2.0 + p + q + b = 1.0 + p + q + g = a * z + b + phi[inext] = g * phi[icur] + if dphi is None: + continue + dg = a * dz + dphi[inext] = g * dphi[icur] + phi[icur] * dg + + # general r by recurrence + for p in range(n - 1): + for q in range(0, n - p - 1): + for r in range(1, n - p - q): + icur = idx(p, q, r) + inext = idx(p, q, r + 1) + iprev = idx(p, q, r - 1) + ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) + phi[inext] = (ar * z + br) * phi[icur] - cr * phi[iprev] + if skip_derivs: + continue + dphi[inext] = (ar * z + br) * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] + + class ExpansionSet(object): point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) @@ -255,6 +352,43 @@ def __init__(self, ref_el): self.scale = numpy.sqrt(numpy.linalg.det(self.A)) self._dmats_cache = {} + def _tabulate(self, n, pts): + '''A version of tabulate() that also works for a single point. + ''' + dim = self.ref_el.get_spatial_dimension() + results = [None] * self.get_num_members(n) + results[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) + if n == 0: + return results + m1, m2 = self.A.shape + ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) + for i in range(m1)] + recurrence(dim, n, self._make_factors(ref_pts), results) + self._normalize(n, results) + return results + + def _tabulate_derivatives(self, n, pts): + '''A version of tabulate_derivatives() that also works for a single point. + ''' + dim = self.ref_el.get_spatial_dimension() + phi = [None] * self.get_num_members(n) + dphi = [None] * self.get_num_members(n) + phi[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) + dphi[0] = pts - pts + if n == 0: + return phi, dphi + m1, m2 = self.A.shape + ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) + for i in range(m1)] + + ref_pts = numpy.array(ref_pts) + factors = self._make_factors(ref_pts) + dfactors = self._make_dfactors(ref_pts) + recurrence(dim, n, factors, phi, dfactors=dfactors, dphi=dphi) + self._normalize(n, phi) + self._normalize(n, dphi) + return phi, dphi + def _tabulate_duffy(self, n, pts): """Returns a dict of tabulations of phi_i(pts[j]) and each component of the gradient d/dx_k phi_i(pts[j]). Here we employ the Duffy transform, @@ -305,10 +439,15 @@ def make_dmats(self, degree): if degree == 0: return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) pts = self.point_set.recursive_points(self.ref_el.get_vertices(), degree) + tab = self._tabulate_duffy(degree, pts) v, = [tab[alpha].T for alpha in tab if sum(alpha) == 0] dv = numpy.stack([tab[alpha].T for alpha in sorted(tab, reverse=True) if sum(alpha) == 1]) dmats = numpy.linalg.solve(v, dv) + + # v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) + # dv = numpy.array(dv).transpose((1, 2, 0)) + # dmats = numpy.linalg.solve(numpy.transpose(v), dv) return cache.setdefault(key, dmats) def _tabulate_jet(self, degree, pts, order=0): @@ -365,6 +504,18 @@ def __init__(self, ref_el): def get_num_members(self, n): return n + 1 + def _make_factors(self, ref_pts): + return [ref_pts[0], 1., 0., 0.] + + def _make_dfactors(self, ref_pts): + dx = ref_pts - ref_pts + dx[..., 0] = 1. + return [dx, 0.*dx, 0.*dx, 0.*dx] + + def _normalize(self, n, phi): + for p in range(n + 1): + phi[p] *= math.sqrt(p + 0.5) + def tabulate(self, n, pts): """Returns a numpy array A[i,j] = phi_i(pts[j])""" if len(pts) > 0: @@ -407,95 +558,6 @@ def tabulate_derivatives(self, n, pts): return dv -def recurrence(n, x, factors, phi, dx=None, dfactors=None, dphi=None): - dim = len(x) - if dim == 2: - idx = morton_index2 - elif dim == 3: - idx = lambda p, q: morton_index3(p, q, 0) - else: - raise ValueError("Invalid number of spatial dimensions") - f1, f2, f3, f4, f5 = factors - if dfactors is not None: - df1, df2, df3, df4, df5 = dfactors - - # p = 1 - phi[idx(1, 0)] = f1 - if dphi is not None: - dphi[idx(1, 0)] = df1 - - # general p by recurrence - for p in range(1, n): - icur = idx(p, 0) - inext = idx(p + 1, 0) - iprev = idx(p - 1, 0) - a = (2. * p + 1.) / (1. + p) - b = p / (1. + p) - phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] - if dphi is None: - continue - dphi[inext] = a * f1 * dphi[icur] - b * f2 * dphi[iprev] \ - + a * phi[icur] * df1 - b * phi[iprev] * df2 - - # q = 1 - for p in range(n): - icur = idx(p, 0) - inext = idx(p, 1) - g = (p + 0.5) * (1 + x[1]) + f3 - phi[inext] = g * phi[icur] - if dphi is None: - continue - dg = (p + 0.5) * dx[1] + df3 - dphi[inext] = g * dphi[icur] + phi[icur] * dg - - # general q by recurrence - for p in range(n - 1): - for q in range(1, n - p): - icur = idx(p, q) - inext = idx(p, q + 1) - iprev = idx(p, q - 1) - aq, bq, cq = jrc(2 * p + 1, 0, q) - g = aq * f3 + bq * f4 - h = cq * f5 - phi[inext] = g * phi[icur] - h * phi[iprev] - if dphi is None: - continue - dg = aq * df3 + bq * df4 - dh = cq * df5 - dphi[inext] = g * dphi[icur] + phi[icur] * dg - h * dphi[iprev] - phi[iprev] * dh - - if dim < 3: - return - idx = morton_index3 - - # r = 1 - for p in range(n): - for q in range(n - p): - icur = idx(p, q, 0) - inext = idx(p, q, 1) - a = 2.0 + p + q - b = 1.0 + p + q - g = a * x[2] + b - phi[inext] = g * phi[icur] - if dphi is None: - continue - dg = a * dx[2] - dphi[inext] = g * dphi[icur] + phi[icur] * dg - - # general r by recurrence - for p in range(n - 1): - for q in range(0, n - p - 1): - for r in range(1, n - p - q): - icur = idx(p, q, r) - inext = idx(p, q, r + 1) - iprev = idx(p, q, r - 1) - ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) - phi[inext] = (ar * x[2] + br) * phi[icur] - cr * phi[iprev] - if dphi is None: - continue - dphi[inext] = (ar * x[2] + br) * dphi[icur] + ar * phi[icur] * dx[2] - cr * dphi[iprev] - - class TriangleExpansionSet(ExpansionSet): """Evaluates the orthonormal Dubiner basis on a triangular reference element.""" @@ -513,32 +575,30 @@ def tabulate(self, n, pts): else: return numpy.array(self._tabulate(n, numpy.array(pts).T)) - def _tabulate(self, n, pts): - '''A version of tabulate() that also works for a single point. - ''' - results = ((n + 1) * (n + 2) // 2) * [None] - results[0] = 1. + pts[0] - pts[0] - if n == 0: - return results - - m1, m2 = self.A.shape - ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) - for i in range(m1)] + def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] + return [0.5 * (1. + 2. * x + y), + (0.5 * (1. - y)) ** 2, + 1. + y, + 1.] - factors = [(1. + 2 * x + y) / 2., - (y - 1.) ** 2 / 4., - y, - y, 1.] - recurrence(n, ref_pts, factors, results) + def _make_dfactors(self, ref_pts): + y = ref_pts[1] + dx = ref_pts - ref_pts + dy = ref_pts - ref_pts + dx[..., 0] = 1. + dy[..., 1] = 1. + return [dx + dy, + 0.5 * y * dy, + dy, + 0 * dx] + + def _normalize(self, n, phi): idx = morton_index2 for p in range(n + 1): for q in range(n - p + 1): - icur = idx(p, q) - a = math.sqrt((p + 0.5) * (p + q + 1.0)) - results[icur] *= a - return results + phi[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0)) def tabulate_derivatives(self, n, pts): order = 1 @@ -573,41 +633,35 @@ def tabulate(self, n, pts): else: return numpy.array(self._tabulate(n, numpy.array(pts).T)) - def _tabulate(self, n, pts): - '''A version of tabulate() that also works for a single point. - ''' - results = ((n + 1) * (n + 2) * (n + 3) // 6) * [None] - results[0] = 1.0 \ - + pts[0] - pts[0] \ - + pts[1] - pts[1] \ - + pts[2] - pts[2] - - if n == 0: - return results - - m1, m2 = self.A.shape - ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i] - for i in range(m1)] + def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] z = ref_pts[2] + return [0.5 * (2. + 2. * x + y + z), + (0.5 * (y + z))**2, + 1. + y, + 0.5 * (1. - z)] - factors = [0.5 * (2.0 + 2.0 * x + y + z), - (0.5 * (y + z))**2, - 0.5 * (1 + 2.0 * y + z), - 0.5 * (1 - z)] - factors.append(factors[3]**2) - - recurrence(n, ref_pts, factors, results) - + def _make_dfactors(self, ref_pts): + y = ref_pts[1] + z = ref_pts[2] + dx = ref_pts - ref_pts + dy = ref_pts - ref_pts + dz = ref_pts - ref_pts + dx[..., 0] = 1. + dy[..., 1] = 1. + dz[..., 2] = 1. + return [dx + 0.5 * dy + 0.5 * dz, + 0.5 * (y + z) * (dy + dz), + dy, + -0.5 * dz] + + def _normalize(self, n, phi): idx = morton_index3 for p in range(n + 1): for q in range(n - p + 1): for r in range(n - p - q + 1): - icur = idx(p, q, r) - a = math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5)) - results[icur] *= a - return results + phi[idx(p, q, r)] *= math.sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r +1.5)) def tabulate_derivatives(self, n, pts): order = 1 From bac180c6e999b7c2a2951abbf9ccd0de19d92755 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 22:35:40 +0000 Subject: [PATCH 23/76] Derivative recurrence passing tests --- FIAT/expansions.py | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 6a3732d6d..4f9f0c8a2 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -440,14 +440,9 @@ def make_dmats(self, degree): return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) pts = self.point_set.recursive_points(self.ref_el.get_vertices(), degree) - tab = self._tabulate_duffy(degree, pts) - v, = [tab[alpha].T for alpha in tab if sum(alpha) == 0] - dv = numpy.stack([tab[alpha].T for alpha in sorted(tab, reverse=True) if sum(alpha) == 1]) - dmats = numpy.linalg.solve(v, dv) - - # v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) - # dv = numpy.array(dv).transpose((1, 2, 0)) - # dmats = numpy.linalg.solve(numpy.transpose(v), dv) + v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) + dv = numpy.array(dv).transpose((1, 2, 0)) + dmats = numpy.linalg.solve(numpy.transpose(v), dv) return cache.setdefault(key, dmats) def _tabulate_jet(self, degree, pts, order=0): @@ -508,8 +503,7 @@ def _make_factors(self, ref_pts): return [ref_pts[0], 1., 0., 0.] def _make_dfactors(self, ref_pts): - dx = ref_pts - ref_pts - dx[..., 0] = 1. + dx = ref_pts - ref_pts + self.A[:, 0][:, None] return [dx, 0.*dx, 0.*dx, 0.*dx] def _normalize(self, n, phi): @@ -585,12 +579,10 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] - dx = ref_pts - ref_pts - dy = ref_pts - ref_pts - dx[..., 0] = 1. - dy[..., 1] = 1. - return [dx + dy, - 0.5 * y * dy, + dx = ref_pts - ref_pts + self.A[:, 0][:, None] + dy = ref_pts - ref_pts + self.A[:, 1][:, None] + return [dx + 0.5 * dy, + -0.5 * (1. - y) * dy, dy, 0 * dx] @@ -645,12 +637,9 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] z = ref_pts[2] - dx = ref_pts - ref_pts - dy = ref_pts - ref_pts - dz = ref_pts - ref_pts - dx[..., 0] = 1. - dy[..., 1] = 1. - dz[..., 2] = 1. + dx = ref_pts - ref_pts + self.A[:, 0][:, None] + dy = ref_pts - ref_pts + self.A[:, 1][:, None] + dz = ref_pts - ref_pts + self.A[:, 2][:, None] return [dx + 0.5 * dy + 0.5 * dz, 0.5 * (y + z) * (dy + dz), dy, From d2134730c6593eaffce1aee370b1e4e4e594e2e6 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 22:51:02 +0000 Subject: [PATCH 24/76] remove Duffy, tidy up --- FIAT/expansions.py | 329 +++++++++++++-------------------------------- 1 file changed, 95 insertions(+), 234 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 4f9f0c8a2..82afad7f8 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -32,201 +32,10 @@ def jrc(a, b, n): return an, bn, cn -def _tabulate_dpts(tabulator, D, n, order, pts): - X = sympy.DeferredVector('x') - - def form_derivative(F): - '''Forms the derivative recursively, i.e., - F -> [F_x, F_y, F_z], - [F_x, F_y, F_z] -> [[F_xx, F_xy, F_xz], - [F_yx, F_yy, F_yz], - [F_zx, F_zy, F_zz]] - and so forth. - ''' - out = [] - try: - out = [sympy.diff(F, X[j]) for j in range(D)] - except (AttributeError, ValueError): - # Intercept errors like - # AttributeError: 'list' object has no attribute - # 'free_symbols' - for f in F: - out.append(form_derivative(f)) - return out - - def numpy_lambdify(X, F): - '''Unfortunately, SymPy's own lambdify() doesn't work well with - NumPy in that simple functions like - lambda x: 1.0, - when evaluated with NumPy arrays, return just "1.0" instead of - an array of 1s with the same shape as x. This function does that. - ''' - try: - lambda_x = [numpy_lambdify(X, f) for f in F] - except TypeError: # 'function' object is not iterable - # SymPy's lambdify also works on functions that return arrays. - # However, use it componentwise here so we can add 0*x to each - # component individually. This is necessary to maintain shapes - # if evaluated with NumPy arrays. - lmbd_tmp = sympy.lambdify(X, F) - lambda_x = lambda x: lmbd_tmp(x) + 0 * x[0] - return lambda_x - - def evaluate_lambda(lmbd, x): - '''Properly evaluate lambda expressions recursively for iterables. - ''' - try: - values = [evaluate_lambda(l, x) for l in lmbd] - except TypeError: # 'function' object is not iterable - values = lmbd(x) - return values - - # Tabulate symbolically - symbolic_tab = tabulator(n, X) - # Make sure that the entries of symbolic_tab are lists so we can - # append derivatives - symbolic_tab = [[phi] for phi in symbolic_tab] - # - data = (order + 1) * [None] - for r in range(order + 1): - shape = [len(symbolic_tab), len(pts)] + r * [D] - data[r] = numpy.empty(shape) - for i, phi in enumerate(symbolic_tab): - # Evaluate the function numerically using lambda expressions - deriv_lambda = numpy_lambdify(X, phi[r]) - data[r][i] = \ - numpy.array(evaluate_lambda(deriv_lambda, pts.T)).T - # Symbolically compute the next derivative. - # This actually happens once too many here; never mind for - # now. - phi.append(form_derivative(phi[-1])) - return data - - -def xi_triangle(eta): - """Maps from [-1,1]^2 to the (-1,1) reference triangle.""" - eta1, eta2 = eta - xi1 = 0.5 * (1.0 + eta1) * (1.0 - eta2) - 1.0 - xi2 = eta2 - return (xi1, xi2) - - -def xi_tetrahedron(eta): - """Maps from [-1,1]^3 to the -1/1 reference tetrahedron.""" - eta1, eta2, eta3 = eta - xi1 = 0.25 * (1. + eta1) * (1. - eta2) * (1. - eta3) - 1. - xi2 = 0.5 * (1. + eta2) * (1. - eta3) - 1. - xi3 = eta3 - return xi1, xi2, xi3 - - -def eta_square(xi): - """Maps from the (-1,1) reference triangle to [-1,1]^2.""" - xi1, xi2 = xi - with numpy.errstate(divide='ignore', invalid='ignore'): - eta1 = 2. * (1. + xi1) / (1. - xi2) - 1. - eta2 = xi2 - if eta1.dtype != object: - eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. - return eta1, eta2 - - -def eta_cube(xi): - """Maps from the (-1,1) reference tetrahedron to [-1,1]^3.""" - xi1, xi2, xi3 = xi - with numpy.errstate(divide='ignore', invalid='ignore'): - eta1 = 2. * (1. + xi1) / (-xi2 - xi3) - 1. - eta2 = 2. * (1. + xi2) / (1. - xi3) - 1. - eta3 = xi3 - if eta1.dtype != object: - eta1[numpy.logical_not(numpy.isfinite(eta1))] = 1. - if eta2.dtype != object: - eta2[numpy.logical_not(numpy.isfinite(eta2))] = 1. - return eta1, eta2, eta3 - - -def dubiner_1d(order, dim, x): - """Returns a tabulation of the orthonormal Dubiner 1D polymoials, defined as - c_i P_i^(0,0)(x) if dim == 0, - c_ij (1-x)^i P_j^(2i + dim, 0)(x) otherwise.""" - if dim == 0: - scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) - results = jacobi.eval_jacobi_batch(0, 0, order, x[:, None]) - return numpy.multiply(scale[:, None], results, out=results) - sd = (order + 1) * (order + 2) // 2 - phi = numpy.zeros((sd, x.size), dtype=x.dtype) - x1 = (1. - x) * 0.5 - for i in range(order + 1): - n = order - i - alpha = 2 * i + dim - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - if i > 0: - results *= x1 ** i - scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) - numpy.multiply(scale[:, None], results, out=results) - indices = [morton_index2(i, j) for j in range(n + 1)] - phi[indices, :] = results - return phi - - -def dubiner_deriv_1d(order, dim, x): - """Returns a tabulation of the first derivatives of the orthonormal Dubiner - 1D polynomials.""" +def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): if dim == 0: - scale = numpy.sqrt(0.5 + numpy.arange(order + 1)) - results = jacobi.eval_jacobi_deriv_batch(0, 0, order, x[:, None]) - return numpy.multiply(scale[:, None], results, out=results) - sd = (order + 1) * (order + 2) // 2 - dphi = numpy.zeros((sd, x.size), dtype=x.dtype) - x1 = (1. - x) * 0.5 - for i in range(order + 1): - n = order - i - alpha = 2 * i + dim - derivs = jacobi.eval_jacobi_deriv_batch(alpha, 0, n, x[:, None]) - if i > 0: - results = jacobi.eval_jacobi_batch(alpha, 0, n, x[:, None]) - derivs *= x1 - derivs += results * (-0.5 * i) - if i > 1: - derivs *= x1 ** (i - 1) - scale = numpy.sqrt(0.5*(alpha + 1) + numpy.arange(n + 1)) - numpy.multiply(scale[:, None], derivs, out=derivs) - indices = [morton_index2(i, j) for j in range(n + 1)] - dphi[indices, :] = derivs - return dphi - - -def duffy_chain_rule(A, eta, tabulations): - """Applies the chain rule associated with an affine transformation onto the - default simplex on (-1, 1), followed by the Duffy transformation onto [-1, 1]^d. - A: the Jacobian of the affine transformation - eta: the points in [-1, 1]^d - tabulations: On entry, the tabulations of the reference gradient with - respect to eta. On exit, the tabulations of the gradient in physical space. - """ - dim = len(eta) - dphi_dxi = [tabulations[alpha] for alpha in sorted(tabulations, reverse=True) if sum(alpha) == 1] - if len(dphi_dxi) < dim: return - eta1 = [(1. - x) * 0.5 for x in eta] - for i in range(dim): - for j in range(i): - Jij = -0.5 * (1. + eta[j]) - for k in range(j + 1, dim): - if k != i: - Jij *= eta1[k] - dphi_dxi[i] -= dphi_dxi[j] * Jij - for j in range(i + 1, dim): - dphi_dxi[i] /= eta1[j] - j = 0 - for alpha in sorted(tabulations, reverse=True): - if sum(alpha) == 1: - tabulations[alpha] = sum(dphi_dxi[i] * A[i][j] for i in range(dim)) - j += 1 - - -def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): - if dim == 1: + elif dim == 1: idx = lambda p: p elif dim == 2: idx = morton_index2 @@ -244,7 +53,7 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): # p = 1 phi[idx(1)] = f1 - if dphi is not None: + if not skip_derivs: dphi[idx(1)] = df1 # general p by recurrence @@ -257,8 +66,8 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] if skip_derivs: continue - dphi[inext] = a * f1 * dphi[icur] - b * f2 * dphi[iprev] \ - + a * phi[icur] * df1 - b * phi[iprev] * df2 + dphi[inext] = (a * f1 * dphi[icur] - b * f2 * dphi[iprev] + + a * phi[icur] * df1 - b * phi[iprev] * df2) if dim < 2: return @@ -281,7 +90,7 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): iprev = idx(p, q - 1) aq, bq, cq = jrc(2 * p + 1, 0, q) g = aq * f3 + (bq - aq) * f4 - h = cq * f5 + h = cq * f5 phi[inext] = g * phi[icur] - h * phi[iprev] if skip_derivs: continue @@ -322,6 +131,94 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): dphi[inext] = (ar * z + br) * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] +def _tabulate_dpts(tabulator, D, n, order, pts): + X = sympy.DeferredVector('x') + + def form_derivative(F): + '''Forms the derivative recursively, i.e., + F -> [F_x, F_y, F_z], + [F_x, F_y, F_z] -> [[F_xx, F_xy, F_xz], + [F_yx, F_yy, F_yz], + [F_zx, F_zy, F_zz]] + and so forth. + ''' + out = [] + try: + out = [sympy.diff(F, X[j]) for j in range(D)] + except (AttributeError, ValueError): + # Intercept errors like + # AttributeError: 'list' object has no attribute + # 'free_symbols' + for f in F: + out.append(form_derivative(f)) + return out + + def numpy_lambdify(X, F): + '''Unfortunately, SymPy's own lambdify() doesn't work well with + NumPy in that simple functions like + lambda x: 1.0, + when evaluated with NumPy arrays, return just "1.0" instead of + an array of 1s with the same shape as x. This function does that. + ''' + try: + lambda_x = [numpy_lambdify(X, f) for f in F] + except TypeError: # 'function' object is not iterable + # SymPy's lambdify also works on functions that return arrays. + # However, use it componentwise here so we can add 0*x to each + # component individually. This is necessary to maintain shapes + # if evaluated with NumPy arrays. + lmbd_tmp = sympy.lambdify(X, F) + lambda_x = lambda x: lmbd_tmp(x) + 0 * x[0] + return lambda_x + + def evaluate_lambda(lmbd, x): + '''Properly evaluate lambda expressions recursively for iterables. + ''' + try: + values = [evaluate_lambda(l, x) for l in lmbd] + except TypeError: # 'function' object is not iterable + values = lmbd(x) + return values + + # Tabulate symbolically + symbolic_tab = tabulator(n, X) + # Make sure that the entries of symbolic_tab are lists so we can + # append derivatives + symbolic_tab = [[phi] for phi in symbolic_tab] + # + data = (order + 1) * [None] + for r in range(order + 1): + shape = [len(symbolic_tab), len(pts)] + r * [D] + data[r] = numpy.empty(shape) + for i, phi in enumerate(symbolic_tab): + # Evaluate the function numerically using lambda expressions + deriv_lambda = numpy_lambdify(X, phi[r]) + data[r][i] = \ + numpy.array(evaluate_lambda(deriv_lambda, pts.T)).T + # Symbolically compute the next derivative. + # This actually happens once too many here; never mind for + # now. + phi.append(form_derivative(phi[-1])) + return data + + +def xi_triangle(eta): + """Maps from [-1,1]^2 to the (-1,1) reference triangle.""" + eta1, eta2 = eta + xi1 = 0.5 * (1.0 + eta1) * (1.0 - eta2) - 1.0 + xi2 = eta2 + return (xi1, xi2) + + +def xi_tetrahedron(eta): + """Maps from [-1,1]^3 to the -1/1 reference tetrahedron.""" + eta1, eta2, eta3 = eta + xi1 = 0.25 * (1. + eta1) * (1. - eta2) * (1. - eta3) - 1. + xi2 = 0.5 * (1. + eta2) * (1. - eta3) - 1. + xi3 = eta3 + return xi1, xi2, xi3 + + class ExpansionSet(object): point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) @@ -389,42 +286,6 @@ def _tabulate_derivatives(self, n, pts): self._normalize(n, dphi) return phi, dphi - def _tabulate_duffy(self, n, pts): - """Returns a dict of tabulations of phi_i(pts[j]) and each component of - the gradient d/dx_k phi_i(pts[j]). Here we employ the Duffy transform, - and thus this tabulation mode is only recommended for use with interior - points. - """ - from FIAT.polynomial_set import mis - dim = self.ref_el.get_spatial_dimension() - sd = self.get_num_members(n) - xi = numpy.transpose(numpy.dot(pts, self.A.T) + self.b) - eta = (lambda x: x, lambda x: x, eta_square, eta_cube)[dim](xi) - basis = [dubiner_1d(n, k, eta[k]) for k in range(dim)] - derivs = [dubiner_deriv_1d(n, k, eta[k]) for k in range(dim)] - tabulations = {} - alphas = mis(dim, 0) + mis(dim, 1) - for alpha in alphas: - V = [v if a == 0 else dv for a, v, dv in zip(alpha, basis, derivs)] - phi = V[0] - if dim >= 2: - phi1 = phi - phi = numpy.copy(V[1]) - for i in range(n + 1): - indices = [morton_index2(i, j) for j in range(n + 1 - i)] - phi[indices] *= phi1[i] - if dim >= 3: - phi2 = phi - phi = numpy.zeros((sd, V[0].shape[1]), dtype=V[0].dtype) - for i in range(n + 1): - for j in range(n + 1 - i): - Vij = phi2[morton_index2(i, j)] - for k in range(n + 1 - i - j): - phi[morton_index3(i, j, k)] = V[2][morton_index2(i + j, k)] * Vij - tabulations[alpha] = phi - duffy_chain_rule(self.A, eta, tabulations) - return tabulations - def make_dmats(self, degree): """Returns a numpy array with the expansion coefficients dmat[k, j, i] of the gradient of each member of the expansion set: @@ -650,7 +511,7 @@ def _normalize(self, n, phi): for p in range(n + 1): for q in range(n - p + 1): for r in range(n - p - q + 1): - phi[idx(p, q, r)] *= math.sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r +1.5)) + phi[idx(p, q, r)] *= math.sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r + 1.5)) def tabulate_derivatives(self, n, pts): order = 1 From 807e34033059f908611fca7f3ba4b23a6621f2dc Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 31 Oct 2023 22:58:19 +0000 Subject: [PATCH 25/76] cleanup --- FIAT/expansions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 82afad7f8..055d63c7a 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -33,6 +33,7 @@ def jrc(a, b, n): def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): + skip_derivs = dphi is None if dim == 0: return elif dim == 1: @@ -44,7 +45,6 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): else: raise ValueError("Invalid number of spatial dimensions") - skip_derivs = dphi is None f1, f2, f3, f4 = factors f5 = f4 ** 2 if dfactors is not None: @@ -77,7 +77,7 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): inext = idx(p, 1) g = (p + 1.5) * f3 - f4 phi[inext] = g * phi[icur] - if dphi is None: + if skip_derivs: continue dg = (p + 1.5) * df3 - df4 dphi[inext] = g * dphi[icur] + phi[icur] * dg @@ -112,7 +112,7 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): b = 1.0 + p + q g = a * z + b phi[inext] = g * phi[icur] - if dphi is None: + if skip_derivs: continue dg = a * dz dphi[inext] = g * dphi[icur] + phi[icur] * dg From f1024b1050c90f49654ff32b5b9ee51302171fd4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 1 Nov 2023 00:02:05 +0000 Subject: [PATCH 26/76] Unify across dimension of simplex --- FIAT/expansions.py | 116 ++++++++++++++++----------------------------- 1 file changed, 41 insertions(+), 75 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 055d63c7a..e524853d5 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -249,6 +249,13 @@ def __init__(self, ref_el): self.scale = numpy.sqrt(numpy.linalg.det(self.A)) self._dmats_cache = {} + def get_num_members(self, n): + dim = self.ref_el.get_spatial_dimension() + num_members = 1 + for k in range(1, dim+1): + num_members = (num_members * (n + k)) // k + return num_members + def _tabulate(self, n, pts): '''A version of tabulate() that also works for a single point. ''' @@ -286,6 +293,29 @@ def _tabulate_derivatives(self, n, pts): self._normalize(n, dphi) return phi, dphi + def tabulate(self, n, pts): + if len(pts) == 0: + return numpy.array([]) + else: + return numpy.array(self._tabulate(n, numpy.array(pts).T)) + + def tabulate_derivatives(self, n, pts): + order = 1 + D = self.ref_el.get_spatial_dimension() + data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) + # Put data in the required data structure, i.e., + # k-tuples which contain the value, and the k-1 derivatives + # (gradient, Hessian, ...) + m, n = data[0].shape + data2 = [[tuple([data[r][i][j] for r in range(order+1)]) + for j in range(n)] + for i in range(m)] + return data2 + + def tabulate_jet(self, n, pts, order=1): + D = self.ref_el.get_spatial_dimension() + return _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) + def make_dmats(self, degree): """Returns a numpy array with the expansion coefficients dmat[k, j, i] of the gradient of each member of the expansion set: @@ -331,9 +361,6 @@ def __init__(self, ref_el): raise ValueError("Must have a point") super(PointExpansionSet, self).__init__(ref_el) - def get_num_members(self, n): - return 1 - def tabulate(self, n, pts): """Returns a numpy array A[i,j] = phi_i(pts[j]) = 1.0.""" assert n == 0 @@ -357,14 +384,11 @@ def __init__(self, ref_el): raise Exception("Must have a line") super(LineExpansionSet, self).__init__(ref_el) - def get_num_members(self, n): - return n + 1 - def _make_factors(self, ref_pts): return [ref_pts[0], 1., 0., 0.] def _make_dfactors(self, ref_pts): - dx = ref_pts - ref_pts + self.A[:, 0][:, None] + dx = ref_pts - ref_pts + self.A[:, 0:1] return [dx, 0.*dx, 0.*dx, 0.*dx] def _normalize(self, n, phi): @@ -375,12 +399,8 @@ def tabulate(self, n, pts): """Returns a numpy array A[i,j] = phi_i(pts[j])""" if len(pts) > 0: ref_pts = numpy.array([self.mapping(pt) for pt in pts]) - psitilde_as = jacobi.eval_jacobi_batch(0, 0, n, ref_pts) - - results = numpy.zeros((n + 1, len(pts)), type(pts[0][0])) - for k in range(n + 1): - results[k, :] = psitilde_as[k, :] * math.sqrt(k + 0.5) - + results = jacobi.eval_jacobi_batch(0, 0, n, ref_pts) + self._normalize(n, results) return results else: return [] @@ -391,14 +411,11 @@ def tabulate_derivatives(self, n, pts): compatibility with the interfaces of the triangle and tetrahedron expansions.""" ref_pts = numpy.array([self.mapping(pt) for pt in pts]) - psitilde_as_derivs = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts) + results = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts) # Jacobi polynomials defined on [-1, 1], first derivatives need scaling - psitilde_as_derivs *= 2.0 / self.ref_el.volume() - - results = numpy.zeros((n + 1, len(pts)), "d") - for k in range(0, n + 1): - results[k, :] = psitilde_as_derivs[k, :] * numpy.sqrt(k + 0.5) + results *= 2.0 / self.ref_el.volume() + self._normalize(n, results) vals = self.tabulate(n, pts) deriv_vals = (results,) @@ -421,15 +438,6 @@ def __init__(self, ref_el): raise Exception("Must have a triangle") super(TriangleExpansionSet, self).__init__(ref_el) - def get_num_members(self, n): - return (n + 1) * (n + 2) // 2 - - def tabulate(self, n, pts): - if len(pts) == 0: - return numpy.array([]) - else: - return numpy.array(self._tabulate(n, numpy.array(pts).T)) - def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] @@ -440,8 +448,8 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] - dx = ref_pts - ref_pts + self.A[:, 0][:, None] - dy = ref_pts - ref_pts + self.A[:, 1][:, None] + dx = ref_pts - ref_pts + self.A[:, 0:1] + dy = ref_pts - ref_pts + self.A[:, 1:2] return [dx + 0.5 * dy, -0.5 * (1. - y) * dy, dy, @@ -453,22 +461,6 @@ def _normalize(self, n, phi): for q in range(n - p + 1): phi[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0)) - def tabulate_derivatives(self, n, pts): - order = 1 - data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) - # Put data in the required data structure, i.e., - # k-tuples which contain the value, and the k-1 derivatives - # (gradient, Hessian, ...) - m = data[0].shape[0] - n = data[0].shape[1] - data2 = [[tuple([data[r][i][j] for r in range(order+1)]) - for j in range(n)] - for i in range(m)] - return data2 - - def tabulate_jet(self, n, pts, order=1): - return _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts)) - class TetrahedronExpansionSet(ExpansionSet): """Collapsed orthonormal polynomial expanion on a tetrahedron.""" @@ -477,15 +469,6 @@ def __init__(self, ref_el): raise Exception("Must be a tetrahedron") super(TetrahedronExpansionSet, self).__init__(ref_el) - def get_num_members(self, n): - return (n + 1) * (n + 2) * (n + 3) // 6 - - def tabulate(self, n, pts): - if len(pts) == 0: - return numpy.array([]) - else: - return numpy.array(self._tabulate(n, numpy.array(pts).T)) - def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] @@ -498,9 +481,9 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] z = ref_pts[2] - dx = ref_pts - ref_pts + self.A[:, 0][:, None] - dy = ref_pts - ref_pts + self.A[:, 1][:, None] - dz = ref_pts - ref_pts + self.A[:, 2][:, None] + dx = ref_pts - ref_pts + self.A[:, 0:1] + dy = ref_pts - ref_pts + self.A[:, 1:2] + dz = ref_pts - ref_pts + self.A[:, 2:3] return [dx + 0.5 * dy + 0.5 * dz, 0.5 * (y + z) * (dy + dz), dy, @@ -513,23 +496,6 @@ def _normalize(self, n, phi): for r in range(n - p - q + 1): phi[idx(p, q, r)] *= math.sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r + 1.5)) - def tabulate_derivatives(self, n, pts): - order = 1 - D = 3 - data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) - # Put data in the required data structure, i.e., - # k-tuples which contain the value, and the k-1 derivatives - # (gradient, Hessian, ...) - m = data[0].shape[0] - n = data[0].shape[1] - data2 = [[tuple([data[r][i][j] for r in range(order + 1)]) - for j in range(n)] - for i in range(m)] - return data2 - - def tabulate_jet(self, n, pts, order=1): - return _tabulate_dpts(self._tabulate, 3, n, order, numpy.array(pts)) - def polynomial_dimension(ref_el, degree): """Returns the dimension of the space of polynomials of degree no From b452e274f80243cbac3e3fd55debedfe79167eac Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 1 Nov 2023 00:13:39 +0000 Subject: [PATCH 27/76] cleanup --- FIAT/expansions.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index e524853d5..0831fb6b1 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -385,11 +385,11 @@ def __init__(self, ref_el): super(LineExpansionSet, self).__init__(ref_el) def _make_factors(self, ref_pts): - return [ref_pts[0], 1., 0., 0.] + return ref_pts[0], 1., 0., 0. def _make_dfactors(self, ref_pts): dx = ref_pts - ref_pts + self.A[:, 0:1] - return [dx, 0.*dx, 0.*dx, 0.*dx] + return dx, 0.*dx, 0.*dx, 0.*dx def _normalize(self, n, phi): for p in range(n + 1): @@ -441,19 +441,21 @@ def __init__(self, ref_el): def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] - return [0.5 * (1. + 2. * x + y), - (0.5 * (1. - y)) ** 2, - 1. + y, - 1.] + factor1 = 0.5 * (1. + 2. * x + y) + factor2 = (0.5 * (1. - y)) ** 2 + factor3 = 1. + y + factor4 = 1. + return factor1, factor2, factor3, factor4 def _make_dfactors(self, ref_pts): y = ref_pts[1] dx = ref_pts - ref_pts + self.A[:, 0:1] dy = ref_pts - ref_pts + self.A[:, 1:2] - return [dx + 0.5 * dy, - -0.5 * (1. - y) * dy, - dy, - 0 * dx] + dfactor1 = dx + 0.5 * dy + dfactor2 = -0.5 * (1. - y) * dy + dfactor3 = dy + dfactor4 = 0. * dx + return dfactor1, dfactor2, dfactor3, dfactor4 def _normalize(self, n, phi): idx = morton_index2 @@ -473,10 +475,11 @@ def _make_factors(self, ref_pts): x = ref_pts[0] y = ref_pts[1] z = ref_pts[2] - return [0.5 * (2. + 2. * x + y + z), - (0.5 * (y + z))**2, - 1. + y, - 0.5 * (1. - z)] + factor1 = 0.5 * (2. + 2. * x + y + z) + factor2 = (0.5 * (y + z))**2 + factor3 = 1. + y + factor4 = 0.5 * (1. - z) + return factor1, factor2, factor3, factor4 def _make_dfactors(self, ref_pts): y = ref_pts[1] @@ -484,10 +487,11 @@ def _make_dfactors(self, ref_pts): dx = ref_pts - ref_pts + self.A[:, 0:1] dy = ref_pts - ref_pts + self.A[:, 1:2] dz = ref_pts - ref_pts + self.A[:, 2:3] - return [dx + 0.5 * dy + 0.5 * dz, - 0.5 * (y + z) * (dy + dz), - dy, - -0.5 * dz] + dfactor1 = 0.5 * (2. * dx + dy + dz) + dfactor2 = 0.5 * (y + z) * (dy + dz) + dfactor3 = dy + dfactor4 = -0.5 * dz + return dfactor1, dfactor2, dfactor3, dfactor4 def _normalize(self, n, phi): idx = morton_index3 From 404e30db3eda301213a124a68edd99ebefc73f8c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 1 Nov 2023 10:17:55 +0000 Subject: [PATCH 28/76] tidy up recurrence --- FIAT/expansions.py | 50 +++++++++++++++++++--------------------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 0831fb6b1..7d17e8029 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -52,15 +52,15 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): df5 = 2 * f4 * df4 # p = 1 - phi[idx(1)] = f1 + icur = idx(0) + inext = idx(1) + phi[inext] = f1 if not skip_derivs: - dphi[idx(1)] = df1 - + dphi[inext] = df1 # general p by recurrence for p in range(1, n): - icur = idx(p) + iprev, icur = icur, inext inext = idx(p + 1) - iprev = idx(p - 1) a = (2. * p + 1.) / (1. + p) b = p / (1. + p) phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] @@ -71,23 +71,19 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): if dim < 2: return - # q = 1 for p in range(n): + # q = 1 icur = idx(p, 0) inext = idx(p, 1) g = (p + 1.5) * f3 - f4 phi[inext] = g * phi[icur] - if skip_derivs: - continue - dg = (p + 1.5) * df3 - df4 - dphi[inext] = g * dphi[icur] + phi[icur] * dg - - # general q by recurrence - for p in range(n - 1): + if not skip_derivs: + dg = (p + 1.5) * df3 - df4 + dphi[inext] = g * dphi[icur] + phi[icur] * dg + # general q by recurrence for q in range(1, n - p): - icur = idx(p, q) + iprev, icur = icur, inext inext = idx(p, q + 1) - iprev = idx(p, q - 1) aq, bq, cq = jrc(2 * p + 1, 0, q) g = aq * f3 + (bq - aq) * f4 h = cq * f5 @@ -103,32 +99,28 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): z = 1 - 2 * f4 if dfactors: dz = -2 * df4 - # r = 1 + for p in range(n): - for q in range(n - p): + for q in range(0, n - p): + # r = 1 icur = idx(p, q, 0) inext = idx(p, q, 1) a = 2.0 + p + q b = 1.0 + p + q g = a * z + b phi[inext] = g * phi[icur] - if skip_derivs: - continue - dg = a * dz - dphi[inext] = g * dphi[icur] + phi[icur] * dg - - # general r by recurrence - for p in range(n - 1): - for q in range(0, n - p - 1): + if not skip_derivs: + dphi[inext] = g * dphi[icur] + a * phi[icur] * dz + # general r by recurrence for r in range(1, n - p - q): - icur = idx(p, q, r) + iprev, icur = icur, inext inext = idx(p, q, r + 1) - iprev = idx(p, q, r - 1) ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) - phi[inext] = (ar * z + br) * phi[icur] - cr * phi[iprev] + g = ar * z + br + phi[inext] = g * phi[icur] - cr * phi[iprev] if skip_derivs: continue - dphi[inext] = (ar * z + br) * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] + dphi[inext] = g * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] def _tabulate_dpts(tabulator, D, n, order, pts): From f35b8fcf01a735f051c5e1a4965ec214da259711 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 1 Nov 2023 14:00:41 +0000 Subject: [PATCH 29/76] style --- FIAT/expansions.py | 45 +++++++++++++++++++++++------------------- FIAT/polynomial_set.py | 6 +++--- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 7d17e8029..30e71a52e 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -17,8 +17,8 @@ from FIAT.recursive_points import RecursivePointSet -def morton_index2(i, j=0): - return (i + j) * (i + j + 1) // 2 + j +def morton_index2(p, q=0): + return (p + q) * (p + q + 1) // 2 + q def morton_index3(p, q=0, r=0): @@ -96,12 +96,12 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): if dim < 3: return - z = 1 - 2 * f4 + z = 1. - 2. * f4 if dfactors: - dz = -2 * df4 + dz = -2. * df4 for p in range(n): - for q in range(0, n - p): + for q in range(n - p): # r = 1 icur = idx(p, q, 0) inext = idx(p, q, 1) @@ -248,36 +248,41 @@ def get_num_members(self, n): num_members = (num_members * (n + k)) // k return num_members + def _mapping(self, pts): + if isinstance(pts, numpy.ndarray): + return numpy.dot(self.A, pts) + self.b[:, None] + else: + m1, m2 = self.A.shape + return [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) + for i in range(m1)] + def _tabulate(self, n, pts): - '''A version of tabulate() that also works for a single point. - ''' + """A version of tabulate() that also works for a single point. + """ dim = self.ref_el.get_spatial_dimension() results = [None] * self.get_num_members(n) results[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) if n == 0: return results - m1, m2 = self.A.shape - ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) - for i in range(m1)] + + ref_pts = self._mapping(pts) recurrence(dim, n, self._make_factors(ref_pts), results) self._normalize(n, results) return results def _tabulate_derivatives(self, n, pts): - '''A version of tabulate_derivatives() that also works for a single point. - ''' + """A version of tabulate_derivatives() that also works for a single point. + """ dim = self.ref_el.get_spatial_dimension() - phi = [None] * self.get_num_members(n) - dphi = [None] * self.get_num_members(n) + num_members = self.get_num_members(n) + phi = [None] * num_members + dphi = [None] * num_members phi[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) dphi[0] = pts - pts if n == 0: return phi, dphi - m1, m2 = self.A.shape - ref_pts = [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) - for i in range(m1)] - ref_pts = numpy.array(ref_pts) + ref_pts = self._mapping(pts) factors = self._make_factors(ref_pts) dfactors = self._make_dfactors(ref_pts) recurrence(dim, n, factors, phi, dfactors=dfactors, dphi=dphi) @@ -308,7 +313,7 @@ def tabulate_jet(self, n, pts, order=1): D = self.ref_el.get_spatial_dimension() return _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) - def make_dmats(self, degree): + def get_dmats(self, degree): """Returns a numpy array with the expansion coefficients dmat[k, j, i] of the gradient of each member of the expansion set: d/dx_k phi_j = sum_i dmat[k, j, i] phi_i. @@ -332,7 +337,7 @@ def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} base_vals = self.tabulate(degree, pts) - dmats = self.make_dmats(degree) if order > 0 else [] + dmats = self.get_dmats(degree) if order > 0 else [] for i in range(order + 1): alphas = mis(self.ref_el.get_spatial_dimension(), i) for alpha in alphas: diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index cfde1a84d..381d360e6 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -25,9 +25,9 @@ def mis(m, n): if m == 1: return [(n,)] elif n == 0: - return [tuple([0] * m)] + return [(0,) * m] else: - return [tuple([n - i] + list(foo)) + return [(n - i,) + foo for i in range(n + 1) for foo in mis(m - 1, i)] @@ -91,7 +91,7 @@ def get_embedded_degree(self): def get_dmats(self): if len(self.dmats) == 0: - self.dmats = self.expansion_set.make_dmats(self.degree) + self.dmats = self.expansion_set.get_dmats(self.degree) return self.dmats def get_reference_element(self): From 19d95361038597d714dcbf1805d1c5173083494d Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 1 Nov 2023 18:34:17 +0000 Subject: [PATCH 30/76] refactoring --- FIAT/barycentric_interpolation.py | 4 ++-- FIAT/expansions.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py index 1a80bd345..b2dccf39d 100644 --- a/FIAT/barycentric_interpolation.py +++ b/FIAT/barycentric_interpolation.py @@ -36,8 +36,8 @@ def __init__(self, ref_el, pts): def get_num_members(self, n): return len(self.nodes) - def make_dmats(self, degree): - return [numpy.transpose(self.dmat)] + def get_dmats(self, degree): + return [self.dmat.T] def tabulate(self, n, pts): assert n == len(self.nodes)-1 diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 30e71a52e..f7c02fae7 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -46,10 +46,8 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): raise ValueError("Invalid number of spatial dimensions") f1, f2, f3, f4 = factors - f5 = f4 ** 2 if dfactors is not None: df1, df2, df3, df4 = dfactors - df5 = 2 * f4 * df4 # p = 1 icur = idx(0) @@ -71,6 +69,10 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): if dim < 2: return + f5 = f4 ** 2 + if dfactors is not None: + df5 = 2 * f4 * df4 + for p in range(n): # q = 1 icur = idx(p, 0) From a3de199dfda291fd72f30e84dfbffd779b71c3ad Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 3 Nov 2023 00:51:14 +0000 Subject: [PATCH 31/76] GLL interpolation test --- FIAT/expansions.py | 12 +++--- FIAT/gauss_legendre.py | 19 ++------- FIAT/gauss_lobatto_legendre.py | 50 ++---------------------- FIAT/lagrange.py | 4 +- FIAT/polynomial_set.py | 2 + FIAT/reference_element.py | 37 ++++++++++++------ test/unit/test_gauss_lobatto_legendre.py | 43 ++++++++++++++++++-- 7 files changed, 80 insertions(+), 87 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index f7c02fae7..1199f7b31 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -387,7 +387,7 @@ def _make_factors(self, ref_pts): return ref_pts[0], 1., 0., 0. def _make_dfactors(self, ref_pts): - dx = ref_pts - ref_pts + self.A[:, 0:1] + dx = ref_pts - ref_pts + self.A[0][:, None] return dx, 0.*dx, 0.*dx, 0.*dx def _normalize(self, n, phi): @@ -448,8 +448,8 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] - dx = ref_pts - ref_pts + self.A[:, 0:1] - dy = ref_pts - ref_pts + self.A[:, 1:2] + dx = ref_pts - ref_pts + self.A[0][:, None] + dy = ref_pts - ref_pts + self.A[1][:, None] dfactor1 = dx + 0.5 * dy dfactor2 = -0.5 * (1. - y) * dy dfactor3 = dy @@ -483,9 +483,9 @@ def _make_factors(self, ref_pts): def _make_dfactors(self, ref_pts): y = ref_pts[1] z = ref_pts[2] - dx = ref_pts - ref_pts + self.A[:, 0:1] - dy = ref_pts - ref_pts + self.A[:, 1:2] - dz = ref_pts - ref_pts + self.A[:, 2:3] + dx = ref_pts - ref_pts + self.A[0][:, None] + dy = ref_pts - ref_pts + self.A[1][:, None] + dz = ref_pts - ref_pts + self.A[2][:, None] dfactor1 = 0.5 * (2. * dx + dy + dz) dfactor2 = 0.5 * (y + z) * (dy + dz) dfactor3 = dy diff --git a/FIAT/gauss_legendre.py b/FIAT/gauss_legendre.py index 7fe845034..1582c5caf 100644 --- a/FIAT/gauss_legendre.py +++ b/FIAT/gauss_legendre.py @@ -8,27 +8,16 @@ # # Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021 -from FIAT import (finite_element, polynomial_set, dual_set, functional, - quadrature, recursive_points) -from FIAT.reference_element import POINT, LINE, TRIANGLE, TETRAHEDRON, UFCInterval +from FIAT import finite_element, polynomial_set, dual_set, functional +from FIAT.reference_element import POINT, LINE, TRIANGLE, TETRAHEDRON from FIAT.orientation_utils import make_entity_permutations_simplex from FIAT.barycentric_interpolation import LagrangePolynomialSet - - -class GaussLegendrePointSet(recursive_points.RecursivePointSet): - """Recursive point set on simplices based on the Gauss-Legendre points on - the interval""" - def __init__(self): - ref_el = UFCInterval() - lr = quadrature.GaussLegendreQuadratureLineRule - f = lambda n: lr(ref_el, n + 1).get_points() - super(GaussLegendrePointSet, self).__init__(f) +from FIAT.reference_element import make_lattice class GaussLegendreDualSet(dual_set.DualSet): """The dual basis for discontinuous elements with nodes at the (recursive) Gauss-Legendre points.""" - point_set = GaussLegendrePointSet() def __init__(self, ref_el, degree): entity_ids = {} @@ -43,7 +32,7 @@ def __init__(self, ref_el, degree): entity_permutations[dim][entity] = perms # make nodes by getting points - pts = self.point_set.recursive_points(ref_el.get_vertices(), degree) + pts = make_lattice(ref_el.get_vertices(), degree, family="gl") nodes = [functional.PointEvaluation(ref_el, x) for x in pts] entity_ids[dim][0] = list(range(len(nodes))) super(GaussLegendreDualSet, self).__init__(nodes, ref_el, entity_ids, entity_permutations) diff --git a/FIAT/gauss_lobatto_legendre.py b/FIAT/gauss_lobatto_legendre.py index b7ccf7c30..51f9bf4db 100644 --- a/FIAT/gauss_lobatto_legendre.py +++ b/FIAT/gauss_lobatto_legendre.py @@ -8,61 +8,17 @@ # # Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021 -from FIAT import (finite_element, polynomial_set, dual_set, functional, - quadrature, recursive_points) -from FIAT.reference_element import LINE, TRIANGLE, TETRAHEDRON, UFCInterval -from FIAT.orientation_utils import make_entity_permutations_simplex +from FIAT import finite_element, polynomial_set, lagrange +from FIAT.reference_element import LINE, TRIANGLE, TETRAHEDRON from FIAT.barycentric_interpolation import LagrangePolynomialSet -class GaussLobattoLegendrePointSet(recursive_points.RecursivePointSet): - """Recursive point set on simplices based on the Gauss-Lobatto points on - the interval""" - def __init__(self): - ref_el = UFCInterval() - lr = quadrature.GaussLobattoLegendreQuadratureLineRule - f = lambda n: lr(ref_el, n + 1).get_points() if n else None - super(GaussLobattoLegendrePointSet, self).__init__(f) - - -class GaussLobattoLegendreDualSet(dual_set.DualSet): - """The dual basis for continuous elements with nodes at the - (recursive) Gauss-Lobatto points.""" - point_set = GaussLobattoLegendrePointSet() - - def __init__(self, ref_el, degree): - entity_ids = {} - nodes = [] - entity_permutations = {} - - # make nodes by getting points - # need to do this dimension-by-dimension, facet-by-facet - top = ref_el.get_topology() - - cur = 0 - for dim in sorted(top): - entity_ids[dim] = {} - entity_permutations[dim] = {} - perms = {0: [0]} if dim == 0 else make_entity_permutations_simplex(dim, degree - dim) - for entity in sorted(top[dim]): - pts_cur = self.point_set.make_points(ref_el, dim, entity, degree) - nodes_cur = [functional.PointEvaluation(ref_el, x) - for x in pts_cur] - nnodes_cur = len(nodes_cur) - nodes += nodes_cur - entity_ids[dim][entity] = list(range(cur, cur + nnodes_cur)) - cur += nnodes_cur - entity_permutations[dim][entity] = perms - - super(GaussLobattoLegendreDualSet, self).__init__(nodes, ref_el, entity_ids, entity_permutations) - - class GaussLobattoLegendre(finite_element.CiarletElement): """Simplicial continuous element with nodes at the (recursive) Gauss-Lobatto points.""" def __init__(self, ref_el, degree): if ref_el.shape not in {LINE, TRIANGLE, TETRAHEDRON}: raise ValueError("Gauss-Lobatto-Legendre elements are only defined on simplices.") - dual = GaussLobattoLegendreDualSet(ref_el, degree) + dual = lagrange.LagrangeDualSet(ref_el, degree, family="lgl") if ref_el.shape == LINE: points = [] for node in dual.nodes: diff --git a/FIAT/lagrange.py b/FIAT/lagrange.py index 7852bd2bb..f87ab28f1 100644 --- a/FIAT/lagrange.py +++ b/FIAT/lagrange.py @@ -14,7 +14,7 @@ class LagrangeDualSet(dual_set.DualSet): simplices of any dimension. Nodes are point evaluation at equispaced points.""" - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, family="equi"): entity_ids = {} nodes = [] entity_permutations = {} @@ -29,7 +29,7 @@ def __init__(self, ref_el, degree): entity_permutations[dim] = {} perms = {0: [0]} if dim == 0 else make_entity_permutations_simplex(dim, degree - dim) for entity in sorted(top[dim]): - pts_cur = ref_el.make_points(dim, entity, degree) + pts_cur = ref_el.make_points(dim, entity, degree, family=family) nodes_cur = [functional.PointEvaluation(ref_el, x) for x in pts_cur] nnodes_cur = len(nodes_cur) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 381d360e6..8ce1204d0 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -115,6 +115,7 @@ class ONPolynomialSet(PolynomialSet): for vector- and tensor-valued sets as well. """ + def __init__(self, ref_el, degree, shape=tuple()): if shape == tuple(): @@ -143,6 +144,7 @@ def __init__(self, ref_el, degree, shape=tuple()): cur_idx = tuple([cur_bf] + list(idx) + [exp_bf]) coeffs[cur_idx] = 1.0 cur_bf += 1 + PolynomialSet.__init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs) diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 5b2234cf8..a06e12f57 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -24,6 +24,7 @@ from collections import defaultdict import operator from math import factorial +from recursivenodes.nodes import _recursive, _decode_family from FIAT.orientation_utils import make_cell_orientation_reflection_map_simplex, make_cell_orientation_reflection_map_tensorproduct @@ -39,6 +40,20 @@ TENSORPRODUCT = 99 +def multiindex_equal(d, isum, imin=0): + """A generator for d-tuple multi-indices whose sum is isum and minimum is imin. + """ + if d <= 0: + return + imax = isum - (d - 1) * imin + if imax < imin: + return + for i in range(imin, imax): + for a in multiindex_equal(d - 1, isum - i, imin=imin): + yield a + (i,) + yield (imin,) * (d - 1) + (imax,) + + def lattice_iter(start, finish, depth): """Generator iterating over the depth-dimensional lattice of integers between start and (finish-1). This works on simplices in @@ -54,7 +69,7 @@ def lattice_iter(start, finish, depth): yield jj + [ii] -def make_lattice(verts, n, interior=0): +def make_lattice(verts, n, interior=0, family="equi"): """Constructs a lattice of points on the simplex defined by verts. For example, the 1:st order lattice will be just the vertices. The optional argument interior specifies how many points from @@ -62,15 +77,11 @@ def make_lattice(verts, n, interior=0): and interior = 0, this function will return the vertices and midpoint, but with interior = 1, it will only return the midpoint.""" - - vs = numpy.array(verts) - hs = (vs - vs[0])[1:, :] / n - - m = hs.shape[0] - result = [tuple(vs[0] + numpy.array(indices).dot(hs)) - for indices in lattice_iter(interior, n + 1 - interior, m)] - - return result + family = _decode_family(family) + D = len(verts) + X = numpy.array(verts) + get_point = lambda alpha: tuple(numpy.dot(_recursive(D - 1, n, alpha, family), X)) + return list(map(get_point, multiindex_equal(D, n, interior))) def linalg_subspace_intersection(A, B): @@ -393,7 +404,7 @@ def compute_face_edge_tangents(self, dim, entity_id): edge_ts.append(vert_coords[dest] - vert_coords[source]) return edge_ts - def make_points(self, dim, entity_id, order): + def make_points(self, dim, entity_id, order, family="equi"): """Constructs a lattice of points on the entity_id:th facet of dimension dim. Order indicates how many points to include in each direction.""" @@ -403,9 +414,9 @@ def make_points(self, dim, entity_id, order): entity_verts = \ self.get_vertices_of_subcomplex( self.get_topology()[dim][entity_id]) - return make_lattice(entity_verts, order, 1) + return make_lattice(entity_verts, order, 1, family=family) elif dim == self.get_spatial_dimension(): - return make_lattice(self.get_vertices(), order, 1) + return make_lattice(self.get_vertices(), order, 1, family=family) else: raise ValueError("illegal dimension") diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 427d92b28..6790c2a0e 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -26,12 +26,12 @@ def symmetric_simplex(dim): from FIAT import ufc_simplex s = ufc_simplex(dim) - r = lambda x: x ** 0.5 + h = 0.5 * (3.**0.5) if dim == 2: - s.vertices = [(0.0, 0.0), (-1.0, -r(3.0)), (1.0, -r(3.0))] + s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: - s.vertices = [(r(3.0)/3, 0.0, 0.0), (-r(3.0)/6, 0.5, 0.0), - (-r(3.0)/6, -0.5, 0.0), (0.0, 0.0, r(6.0)/3)] + s.vertices = [(-h, -h, -h), (h, -h, -h), + (-h, h, -h), (-h, -h, h)] return s @@ -83,6 +83,41 @@ def test_symmetry(dim, degree): # TODO add rotational symmetry tests on each facet +@pytest.mark.parametrize("dim, degree", [(1, 64), (2, 64), (3, 16)]) +def test_interpolation(dim, degree): + from FIAT import GaussLobattoLegendre, quadrature, reference_element + + alphas = [tuple(row) for row in np.eye(dim, dtype=int)] + a = np.pi + f = lambda x: np.cos(a * sum(x)) + df = lambda x: -a * np.sin(a * sum(x)) + + s = symmetric_simplex(dim) + # s = reference_element.default_simplex(dim) + rule = quadrature.make_quadrature(s, degree + 1) + points = rule.get_points() + weights = rule.get_weights() + + f_at_pts = {} + f_at_pts[(0,)*dim] = np.array(list(map(f, points))) + for alpha in alphas: + f_at_pts[alpha] = np.array(list(map(df, points))) + + print() + k = 1 + while k <= degree: + fe = GaussLobattoLegendre(s, k) + tab = fe.tabulate(1, points) + coefficients = np.array([v(f) for v in fe.dual_basis()]) + + alpha = (0,) * dim + errorL2 = np.sqrt(np.dot(weights, (f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2)) + err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) + errorH1 = np.sqrt(np.dot(weights, err2)) + print("dim = %d, degree = %2d, L2-error = %.4E, H1-error = %.4E" % (dim, k, errorL2, errorH1)) + k *= 2 + + if __name__ == '__main__': import os pytest.main(os.path.abspath(__file__)) From aec9a08e97c174025bc50c966ad6b6ca298c79af Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 3 Nov 2023 09:39:52 +0000 Subject: [PATCH 32/76] Docstrings --- FIAT/expansions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 1199f7b31..88f5ab120 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -26,6 +26,7 @@ def morton_index3(p, q=0, r=0): def jrc(a, b, n): + """Jacobi recurrence coefficients""" an = (2*n+1+a+b)*(2*n+2+a+b) / (2*(n+1)*(n+1+a+b)) bn = (a*a-b*b) * (2*n+1+a+b) / (2*(n+1)*(2*n+a+b)*(n+1+a+b)) cn = (n+a)*(n+b)*(2*n+2+a+b) / ((n+1)*(n+1+a+b)*(2*n+a+b)) @@ -33,6 +34,7 @@ def jrc(a, b, n): def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): + """Dubiner recurrence from (Kirby 2010)""" skip_derivs = dphi is None if dim == 0: return From ea382db65f4e3fa9699a8e06be3edfcc2d101153 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 3 Nov 2023 16:55:51 +0000 Subject: [PATCH 33/76] cleanup --- FIAT/barycentric_interpolation.py | 4 + FIAT/expansions.py | 154 ++++++++++------------- FIAT/quadrature.py | 68 ++-------- test/unit/test_gauss_legendre.py | 56 +++++++-- test/unit/test_gauss_lobatto_legendre.py | 45 ++++--- 5 files changed, 151 insertions(+), 176 deletions(-) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py index b2dccf39d..48f5ae3a0 100644 --- a/FIAT/barycentric_interpolation.py +++ b/FIAT/barycentric_interpolation.py @@ -53,6 +53,10 @@ def tabulate(self, n, pts): results = numpy.array(list(map(simplify, results))) return results + def _tabulate_derivatives(self, n, pts): + results = self.tabulate(n, pts) + return results, numpy.dot(self.dmat, results)[:, None, :] + def tabulate_derivatives(self, n, pts): return numpy.dot(self.dmat, self.tabulate(n, pts)) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 88f5ab120..85f60326c 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -12,9 +12,6 @@ import sympy from FIAT import reference_element from FIAT import jacobi -from FIAT.reference_element import UFCInterval -from FIAT.quadrature import GaussLegendreQuadratureLineRule -from FIAT.recursive_points import RecursivePointSet def morton_index2(p, q=0): @@ -33,9 +30,8 @@ def jrc(a, b, n): return an, bn, cn -def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): +def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): """Dubiner recurrence from (Kirby 2010)""" - skip_derivs = dphi is None if dim == 0: return elif dim == 1: @@ -47,16 +43,23 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): else: raise ValueError("Invalid number of spatial dimensions") - f1, f2, f3, f4 = factors - if dfactors is not None: - df1, df2, df3, df4 = dfactors + skip_derivs = dphi is None + x, y, z = ref_pts + f0 = 0.5 * (y + z) + f1 = x + f0 + 1. + f2 = f0 ** 2 + if jacobian is not None: + dx, dy, dz = jacobian + df0 = 0.5 * (dy + dz) + df1 = dx + df0 + df2 = 2 * f0 * df0 # p = 1 icur = idx(0) inext = idx(1) phi[inext] = f1 if not skip_derivs: - dphi[inext] = df1 + dphi[inext] = 0. * dphi[icur] + df1 # general p by recurrence for p in range(1, n): iprev, icur = icur, inext @@ -66,44 +69,44 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] if skip_derivs: continue - dphi[inext] = (a * f1 * dphi[icur] - b * f2 * dphi[iprev] + - a * phi[icur] * df1 - b * phi[iprev] * df2) + dphi[inext] = (a * f1 * dphi[icur] + a * phi[icur] * df1 - + b * (f2 * dphi[iprev] + phi[iprev] * df2)) if dim < 2: return + f3 = 1. + y + f4 = 0.5 * (z - 1.) f5 = f4 ** 2 - if dfactors is not None: + if jacobian is not None: + df3 = dy + df4 = 0.5 * dz df5 = 2 * f4 * df4 for p in range(n): # q = 1 icur = idx(p, 0) inext = idx(p, 1) - g = (p + 1.5) * f3 - f4 - phi[inext] = g * phi[icur] + a = p + 1.5 + qcoef = a * f3 + f4 + phi[inext] = qcoef * phi[icur] if not skip_derivs: - dg = (p + 1.5) * df3 - df4 - dphi[inext] = g * dphi[icur] + phi[icur] * dg + dqcoef = a * df3 + df4 + dphi[inext] = qcoef * dphi[icur] + phi[icur] * dqcoef # general q by recurrence for q in range(1, n - p): iprev, icur = icur, inext inext = idx(p, q + 1) aq, bq, cq = jrc(2 * p + 1, 0, q) - g = aq * f3 + (bq - aq) * f4 - h = cq * f5 - phi[inext] = g * phi[icur] - h * phi[iprev] + qcoef = aq * f3 + (aq - bq) * f4 + phi[inext] = qcoef * phi[icur] - cq*(f5 * phi[iprev]) if skip_derivs: continue - dg = aq * df3 + (bq - aq) * df4 - dh = cq * df5 - dphi[inext] = g * dphi[icur] + phi[icur] * dg - h * dphi[iprev] - phi[iprev] * dh + dqcoef = aq * df3 + (aq - bq) * df4 + dphi[inext] = (qcoef * dphi[icur] + phi[icur] * dqcoef - + cq * (f5 * dphi[iprev] + phi[iprev] * df5)) if dim < 3: return - z = 1. - 2. * f4 - if dfactors: - dz = -2. * df4 - for p in range(n): for q in range(n - p): # r = 1 @@ -111,20 +114,20 @@ def recurrence(dim, n, factors, phi, dfactors=None, dphi=None): inext = idx(p, q, 1) a = 2.0 + p + q b = 1.0 + p + q - g = a * z + b - phi[inext] = g * phi[icur] + rcoef = a * z + b + phi[inext] = rcoef * phi[icur] if not skip_derivs: - dphi[inext] = g * dphi[icur] + a * phi[icur] * dz + dphi[inext] = rcoef * dphi[icur] + a * phi[icur] * dz # general r by recurrence for r in range(1, n - p - q): iprev, icur = icur, inext inext = idx(p, q, r + 1) ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) - g = ar * z + br - phi[inext] = g * phi[icur] - cr * phi[iprev] + rcoef = ar * z + br + phi[inext] = rcoef * phi[icur] - cr * phi[iprev] if skip_derivs: continue - dphi[inext] = g * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] + dphi[inext] = rcoef * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] def _tabulate_dpts(tabulator, D, n, order, pts): @@ -216,8 +219,6 @@ def xi_tetrahedron(eta): class ExpansionSet(object): - point_set = RecursivePointSet(lambda n: GaussLegendreQuadratureLineRule(UFCInterval(), n + 1).get_points()) - def __new__(cls, ref_el, *args, **kwargs): """Returns an ExpansionSet instance appopriate for the given reference element.""" @@ -260,6 +261,15 @@ def _mapping(self, pts): return [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) for i in range(m1)] + def _pad_coordinates(self, ref_pts): + x, y, z = tuple(ref_pts) + (-1., )*(3 - len(ref_pts)) + return x, y, z + + def _pad_jacobian(self): + A = numpy.pad(self.A, [(0, 3 - self.A.shape[0]), (0, 0)]) + dx, dy, dz = tuple(row[:, None] for row in A) + return dx, dy, dz + def _tabulate(self, n, pts): """A version of tabulate() that also works for a single point. """ @@ -270,7 +280,7 @@ def _tabulate(self, n, pts): return results ref_pts = self._mapping(pts) - recurrence(dim, n, self._make_factors(ref_pts), results) + recurrence(dim, n, self._pad_coordinates(ref_pts), results) self._normalize(n, results) return results @@ -287,9 +297,8 @@ def _tabulate_derivatives(self, n, pts): return phi, dphi ref_pts = self._mapping(pts) - factors = self._make_factors(ref_pts) - dfactors = self._make_dfactors(ref_pts) - recurrence(dim, n, factors, phi, dfactors=dfactors, dphi=dphi) + recurrence(dim, n, self._pad_coordinates(ref_pts), phi, + jacobian=self._pad_jacobian(), dphi=dphi) self._normalize(n, phi) self._normalize(n, dphi) return phi, dphi @@ -330,8 +339,8 @@ def get_dmats(self, degree): pass if degree == 0: return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) - pts = self.point_set.recursive_points(self.ref_el.get_vertices(), degree) + pts = reference_element.make_lattice(self.ref_el.get_vertices(), degree, family="gl") v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) dv = numpy.array(dv).transpose((1, 2, 0)) dmats = numpy.linalg.solve(numpy.transpose(v), dv) @@ -340,10 +349,21 @@ def get_dmats(self, degree): def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} - base_vals = self.tabulate(degree, pts) - dmats = self.get_dmats(degree) if order > 0 else [] - for i in range(order + 1): - alphas = mis(self.ref_el.get_spatial_dimension(), i) + D = self.ref_el.get_spatial_dimension() + if order == 0: + base_vals = self.tabulate(degree, pts) + else: + v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) + base_vals = numpy.array(v) + dtildes = numpy.array(dv).transpose((1, 0, 2)) + alphas = mis(D, 1) + for alpha in alphas: + result[alpha] = next(dv for dv, ai in zip(dtildes, alpha) if ai > 0) + + # Only use dmats if order > 1 + dmats = self.get_dmats(degree) if order > 1 else [] + for i in range(0, order + 1): + alphas = mis(D, i) for alpha in alphas: beta = next((beta for beta in sorted(result, reverse=True) if all(bj <= aj for bj, aj in zip(beta, alpha))), (0,) * len(alpha)) @@ -385,13 +405,6 @@ def __init__(self, ref_el): raise Exception("Must have a line") super(LineExpansionSet, self).__init__(ref_el) - def _make_factors(self, ref_pts): - return ref_pts[0], 1., 0., 0. - - def _make_dfactors(self, ref_pts): - dx = ref_pts - ref_pts + self.A[0][:, None] - return dx, 0.*dx, 0.*dx, 0.*dx - def _normalize(self, n, phi): for p in range(n + 1): phi[p] *= math.sqrt(p + 0.5) @@ -439,25 +452,6 @@ def __init__(self, ref_el): raise Exception("Must have a triangle") super(TriangleExpansionSet, self).__init__(ref_el) - def _make_factors(self, ref_pts): - x = ref_pts[0] - y = ref_pts[1] - factor1 = 0.5 * (1. + 2. * x + y) - factor2 = (0.5 * (1. - y)) ** 2 - factor3 = 1. + y - factor4 = 1. - return factor1, factor2, factor3, factor4 - - def _make_dfactors(self, ref_pts): - y = ref_pts[1] - dx = ref_pts - ref_pts + self.A[0][:, None] - dy = ref_pts - ref_pts + self.A[1][:, None] - dfactor1 = dx + 0.5 * dy - dfactor2 = -0.5 * (1. - y) * dy - dfactor3 = dy - dfactor4 = 0. * dx - return dfactor1, dfactor2, dfactor3, dfactor4 - def _normalize(self, n, phi): idx = morton_index2 for p in range(n + 1): @@ -472,28 +466,6 @@ def __init__(self, ref_el): raise Exception("Must be a tetrahedron") super(TetrahedronExpansionSet, self).__init__(ref_el) - def _make_factors(self, ref_pts): - x = ref_pts[0] - y = ref_pts[1] - z = ref_pts[2] - factor1 = 0.5 * (2. + 2. * x + y + z) - factor2 = (0.5 * (y + z))**2 - factor3 = 1. + y - factor4 = 0.5 * (1. - z) - return factor1, factor2, factor3, factor4 - - def _make_dfactors(self, ref_pts): - y = ref_pts[1] - z = ref_pts[2] - dx = ref_pts - ref_pts + self.A[0][:, None] - dy = ref_pts - ref_pts + self.A[1][:, None] - dz = ref_pts - ref_pts + self.A[2][:, None] - dfactor1 = 0.5 * (2. * dx + dy + dz) - dfactor2 = 0.5 * (y + z) * (dy + dz) - dfactor3 = dy - dfactor4 = -0.5 * dz - return dfactor1, dfactor2, dfactor3, dfactor4 - def _normalize(self, n, phi): idx = morton_index3 for p in range(n + 1): diff --git a/FIAT/quadrature.py b/FIAT/quadrature.py index a93b73c1a..2843afadc 100644 --- a/FIAT/quadrature.py +++ b/FIAT/quadrature.py @@ -8,10 +8,10 @@ # Modified by David A. Ham (david.ham@imperial.ac.uk), 2015 import itertools -import math import numpy +from recursivenodes.quadrature import gaussjacobi -from FIAT import reference_element, expansions, jacobi, orthopoly +from FIAT import reference_element, expansions, orthopoly class QuadratureRule(object): @@ -42,8 +42,8 @@ class GaussJacobiQuadratureLineRule(QuadratureRule): def __init__(self, ref_el, m): # this gives roots on the default (-1,1) reference element - # (xs_ref, ws_ref) = compute_gauss_jacobi_rule(a, b, m) - (xs_ref, ws_ref) = compute_gauss_jacobi_rule(0., 0., m) + # (xs_ref, ws_ref) = gaussjacobi(m, a, b) + (xs_ref, ws_ref) = gaussjacobi(m, 0., 0.) Ref1 = reference_element.DefaultLine() A, b = reference_element.make_affine_mapping(Ref1.get_vertices(), @@ -186,8 +186,8 @@ class CollapsedQuadratureTriangleRule(QuadratureRule): from the square to the triangle.""" def __init__(self, ref_el, m): - ptx, wx = compute_gauss_jacobi_rule(0., 0., m) - pty, wy = compute_gauss_jacobi_rule(1., 0., m) + ptx, wx = gaussjacobi(m, 0., 0.) + pty, wy = gaussjacobi(m, 1., 0.) # map ptx , pty pts_ref = [expansions.xi_triangle((x, y)) @@ -213,9 +213,9 @@ class CollapsedQuadratureTetrahedronRule(QuadratureRule): from the cube to the tetrahedron.""" def __init__(self, ref_el, m): - ptx, wx = compute_gauss_jacobi_rule(0., 0., m) - pty, wy = compute_gauss_jacobi_rule(1., 0., m) - ptz, wz = compute_gauss_jacobi_rule(2., 0., m) + ptx, wx = gaussjacobi(m, 0., 0.) + pty, wy = gaussjacobi(m, 1., 0.) + ptz, wz = gaussjacobi(m, 2., 0.) # map ptx , pty pts_ref = [expansions.xi_tetrahedron((x, y, z)) @@ -319,53 +319,3 @@ def make_tensor_product_quadrature(*quad_rules): wts = [numpy.prod(wt_tuple) for wt_tuple in itertools.product(*[q.wts for q in quad_rules])] return QuadratureRule(ref_el, pts, wts) - - -# rule to get Gauss-Jacobi points -def compute_gauss_jacobi_points(a, b, m): - """Computes the m roots of P_{m}^{a,b} on [-1,1] by Newton's method. - The initial guesses are the Chebyshev points. Algorithm - implemented in Python from the pseudocode given by Karniadakis and - Sherwin""" - x = [] - eps = 1.e-8 - max_iter = 100 - for k in range(0, m): - r = -math.cos((2.0 * k + 1.0) * math.pi / (2.0 * m)) - if k > 0: - r = 0.5 * (r + x[k - 1]) - j = 0 - delta = 2 * eps - while j < max_iter: - s = 0 - for i in range(0, k): - s = s + 1.0 / (r - x[i]) - f = jacobi.eval_jacobi(a, b, m, r) - fp = jacobi.eval_jacobi_deriv(a, b, m, r) - delta = f / (fp - f * s) - - r = r - delta - - if math.fabs(delta) < eps: - break - else: - j = j + 1 - - x.append(r) - return x - - -def compute_gauss_jacobi_rule(a, b, m): - xs = compute_gauss_jacobi_points(a, b, m) - - a1 = math.pow(2, a + b + 1) - a2 = math.gamma(a + m + 1) - a3 = math.gamma(b + m + 1) - a4 = math.gamma(a + b + m + 1) - a5 = math.factorial(m) - a6 = a1 * a2 * a3 / a4 / a5 - - ws = [a6 / (1.0 - x**2.0) / jacobi.eval_jacobi_deriv(a, b, m, x)**2.0 - for x in xs] - - return xs, ws diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index 5b397396e..09e96ceca 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -24,14 +24,14 @@ def symmetric_simplex(dim): - from FIAT import ufc_simplex - s = ufc_simplex(dim) - r = lambda x: x ** 0.5 + from FIAT.reference_element import default_simplex + s = default_simplex(dim) if dim == 2: - s.vertices = [(0.0, 0.0), (-1.0, -r(3.0)), (1.0, -r(3.0))] + h = 3.**0.5 / dim + s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: - s.vertices = [(r(3.0)/3, 0.0, 0.0), (-r(3.0)/6, 0.5, 0.0), - (-r(3.0)/6, -0.5, 0.0), (0.0, 0.0, r(6.0)/3)] + h = 3.**0.5 / dim + s.vertices = [(-h, h, h), (h, -h, h), (h, h, -h), (h, h, h)] return s @@ -79,7 +79,49 @@ def test_symmetry(dim, degree): transform = s.get_entity_transform(1, entity) assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) - # TODO add rotational symmetry tests + +@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 64), (3, 16)]) +def test_interpolation(dim, degree): + from FIAT import GaussLobattoLegendre, quadrature + from FIAT.polynomial_set import mis + + a = (1. + 0.5) + a = 0.5 * a**2 + r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 + f = lambda x: np.exp(a / (r2(x) - a)) + df = lambda x: f(x) * (-a*(r2(x) - a)**-2) + + s = symmetric_simplex(dim) + rule = quadrature.make_quadrature(s, degree + 1) + points = rule.get_points() + weights = rule.get_weights() + + f_at_pts = {} + f_at_pts[(0,) * dim] = f(points) + df_at_pts = df(points) * points.T + alphas = mis(dim, 1) + for alpha in alphas: + i = next(j for j, aj in enumerate(alpha) if aj > 0) + f_at_pts[alpha] = df_at_pts[i] + + print() + scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) + scaleH1 = 1 / np.sqrt(sum(np.dot(weights, f_at_pts[alpha]**2) for alpha in f_at_pts)) + + k = 1 + while k <= degree: + fe = GaussLobattoLegendre(s, k) + tab = fe.tabulate(1, points) + coefficients = np.array([v(f) for v in fe.dual_basis()]) + + alpha = (0,) * dim + err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) + errorL2 = scaleL2 * np.sqrt(np.dot(weights, err**2)) + + err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) + errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) + print("dim = %d, degree = %2d, L2-error = %.4E, H1-error = %.4E" % (dim, k, errorL2, errorH1)) + k = min(k * 2, k + 16) if __name__ == '__main__': diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 6790c2a0e..ed295b893 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -24,14 +24,14 @@ def symmetric_simplex(dim): - from FIAT import ufc_simplex - s = ufc_simplex(dim) - h = 0.5 * (3.**0.5) + from FIAT.reference_element import default_simplex + s = default_simplex(dim) if dim == 2: + h = 3.**0.5 / dim s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: - s.vertices = [(-h, -h, -h), (h, -h, -h), - (-h, h, -h), (-h, -h, h)] + h = 3.**0.5 / dim + s.vertices = [(-h, h, h), (h, -h, h), (h, h, -h), (h, h, h)] return s @@ -80,30 +80,35 @@ def test_symmetry(dim, degree): transform = s.get_entity_transform(1, entity) assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) - # TODO add rotational symmetry tests on each facet - -@pytest.mark.parametrize("dim, degree", [(1, 64), (2, 64), (3, 16)]) +@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 64), (3, 16)]) def test_interpolation(dim, degree): - from FIAT import GaussLobattoLegendre, quadrature, reference_element + from FIAT import GaussLobattoLegendre, quadrature + from FIAT.polynomial_set import mis - alphas = [tuple(row) for row in np.eye(dim, dtype=int)] - a = np.pi - f = lambda x: np.cos(a * sum(x)) - df = lambda x: -a * np.sin(a * sum(x)) + a = (1. + 0.5) + a = 0.5 * a**2 + r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 + f = lambda x: np.exp(a / (r2(x) - a)) + df = lambda x: f(x) * (-a*(r2(x) - a)**-2) s = symmetric_simplex(dim) - # s = reference_element.default_simplex(dim) rule = quadrature.make_quadrature(s, degree + 1) points = rule.get_points() weights = rule.get_weights() f_at_pts = {} - f_at_pts[(0,)*dim] = np.array(list(map(f, points))) + f_at_pts[(0,) * dim] = f(points) + df_at_pts = df(points) * points.T + alphas = mis(dim, 1) for alpha in alphas: - f_at_pts[alpha] = np.array(list(map(df, points))) + i = next(j for j, aj in enumerate(alpha) if aj > 0) + f_at_pts[alpha] = df_at_pts[i] print() + scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) + scaleH1 = 1 / np.sqrt(sum(np.dot(weights, f_at_pts[alpha]**2) for alpha in f_at_pts)) + k = 1 while k <= degree: fe = GaussLobattoLegendre(s, k) @@ -111,11 +116,13 @@ def test_interpolation(dim, degree): coefficients = np.array([v(f) for v in fe.dual_basis()]) alpha = (0,) * dim - errorL2 = np.sqrt(np.dot(weights, (f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2)) + err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) + errorL2 = scaleL2 * np.sqrt(np.dot(weights, err**2)) + err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) - errorH1 = np.sqrt(np.dot(weights, err2)) + errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) print("dim = %d, degree = %2d, L2-error = %.4E, H1-error = %.4E" % (dim, k, errorL2, errorH1)) - k *= 2 + k = min(k * 2, k + 16) if __name__ == '__main__': From de881f573c6932fb5f681918ed836466b910a409 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 3 Nov 2023 16:58:31 +0000 Subject: [PATCH 34/76] Fix some broken tests --- FIAT/expansions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 85f60326c..1a973ebc0 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -350,7 +350,7 @@ def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} D = self.ref_el.get_spatial_dimension() - if order == 0: + if order == 0 or degree == 0 or D == 0: base_vals = self.tabulate(degree, pts) else: v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) From efd41320b3171e01460edbbca2b323fe18a90170 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 4 Nov 2023 09:06:04 +0000 Subject: [PATCH 35/76] Test with python 3.8-3.9 --- .github/workflows/pythonapp.yml | 2 +- test/unit/test_gauss_legendre.py | 2 +- test/unit/test_gauss_lobatto_legendre.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 53c7aac61..39dd4bcc9 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, macos-latest] - python-version: [3.7, 3.8] + python-version: [3.8, 3.9] steps: - uses: actions/checkout@v2 diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index 09e96ceca..bc96b6c67 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -116,7 +116,7 @@ def test_interpolation(dim, degree): alpha = (0,) * dim err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) - errorL2 = scaleL2 * np.sqrt(np.dot(weights, err**2)) + errorL2 = scaleL2 * np.sqrt(np.dot(weights, err ** 2)) err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index ed295b893..7927125ee 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -117,7 +117,7 @@ def test_interpolation(dim, degree): alpha = (0,) * dim err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) - errorL2 = scaleL2 * np.sqrt(np.dot(weights, err**2)) + errorL2 = scaleL2 * np.sqrt(np.dot(weights, err ** 2)) err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) From 897f1d417ea83fae770fa4722bf954df10b4695c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 4 Nov 2023 09:13:04 +0000 Subject: [PATCH 36/76] add recursivenodes as a dependency --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b4cb69674..a8664f43c 100755 --- a/setup.py +++ b/setup.py @@ -27,4 +27,4 @@ download_url=tarball, license="LGPL v3 or later", packages=["FIAT"], - install_requires=["numpy", "sympy"]) + install_requires=["numpy", "sympy", "recursivenode"]) From 26e2db48c21bb582be2187c6e5178ac96e82fc2c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 4 Nov 2023 09:21:56 +0000 Subject: [PATCH 37/76] typo --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a8664f43c..fa5a47dc4 100755 --- a/setup.py +++ b/setup.py @@ -27,4 +27,4 @@ download_url=tarball, license="LGPL v3 or later", packages=["FIAT"], - install_requires=["numpy", "sympy", "recursivenode"]) + install_requires=["numpy", "sympy", "recursivenodes"]) From 5f60e9b91747c500c92451204c823d14c6b5af58 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 4 Nov 2023 09:33:01 +0000 Subject: [PATCH 38/76] fix tests --- test/unit/test_gauss_legendre.py | 15 +++++++++------ test/unit/test_gauss_lobatto_legendre.py | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index bc96b6c67..ef39f75fb 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -24,9 +24,11 @@ def symmetric_simplex(dim): - from FIAT.reference_element import default_simplex - s = default_simplex(dim) - if dim == 2: + from FIAT.reference_element import ufc_simplex + s = ufc_simplex(dim) + if dim == 1: + s.vertices = [(-1.,), (1.,)] + elif dim == 2: h = 3.**0.5 / dim s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: @@ -57,7 +59,7 @@ def test_gl_basis_values(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 4), (2, 4), (3, 4)]) def test_symmetry(dim, degree): """ Ensure the dual basis has the right symmetry.""" - from FIAT import GaussLegendre, quadrature, expansions, ufc_simplex + from FIAT import GaussLegendre, quadrature, expansions s = symmetric_simplex(dim) fe = GaussLegendre(s, degree) @@ -69,7 +71,8 @@ def test_symmetry(dim, degree): points[i, :], = node.get_point_dict().keys() # Test that edge DOFs are located at the GL quadrature points - lr = quadrature.GaussLegendreQuadratureLineRule(ufc_simplex(1), degree + 1) + line = s if dim == 1 else s.construct_subelement(1) + lr = quadrature.GaussLegendreQuadratureLineRule(line, degree + 1) quadrature_points = lr.pts entity_dofs = fe.entity_dofs() @@ -106,7 +109,7 @@ def test_interpolation(dim, degree): print() scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) - scaleH1 = 1 / np.sqrt(sum(np.dot(weights, f_at_pts[alpha]**2) for alpha in f_at_pts)) + scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) k = 1 while k <= degree: diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 7927125ee..932934963 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -24,9 +24,11 @@ def symmetric_simplex(dim): - from FIAT.reference_element import default_simplex - s = default_simplex(dim) - if dim == 2: + from FIAT.reference_element import ufc_simplex + s = ufc_simplex(dim) + if dim == 1: + s.vertices = [(-1.,), (1.,)] + elif dim == 2: h = 3.**0.5 / dim s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: @@ -57,7 +59,7 @@ def test_gll_basis_values(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 4), (2, 4), (3, 4)]) def test_symmetry(dim, degree): """ Ensure the dual basis has the right symmetry.""" - from FIAT import GaussLobattoLegendre, quadrature, expansions, ufc_simplex + from FIAT import GaussLobattoLegendre, quadrature, expansions s = symmetric_simplex(dim) fe = GaussLobattoLegendre(s, degree) @@ -69,7 +71,8 @@ def test_symmetry(dim, degree): points[i, :], = node.get_point_dict().keys() # Test that edge DOFs are located at the GLL quadrature points - lr = quadrature.GaussLobattoLegendreQuadratureLineRule(ufc_simplex(1), degree + 1) + line = s if dim == 1 else s.construct_subelement(1) + lr = quadrature.GaussLobattoLegendreQuadratureLineRule(line, degree + 1) # Edge DOFs are ordered with the two vertex DOFs followed by the interior DOFs quadrature_points = lr.pts[::degree] + lr.pts[1:-1] @@ -107,7 +110,7 @@ def test_interpolation(dim, degree): print() scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) - scaleH1 = 1 / np.sqrt(sum(np.dot(weights, f_at_pts[alpha]**2) for alpha in f_at_pts)) + scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) k = 1 while k <= degree: From a5be0976e0feb0c57cc8303bbae2e3ee19634626 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sat, 4 Nov 2023 12:35:06 +0000 Subject: [PATCH 39/76] tabulate_derivatives without _tabulate_dpts --- FIAT/expansions.py | 95 +++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 55 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 1a973ebc0..9d5e18f79 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -30,9 +30,20 @@ def jrc(a, b, n): return an, bn, cn +def pad_coordinates(ref_pts, embedded_dim): + """Pad reference coordinates by appending -1.0.""" + return tuple(ref_pts) + (-1.0, )*(embedded_dim - len(ref_pts)) + + +def pad_jacobian(A, embedded_dim): + """Pad coordinate mapping Jacobian by appending zero rows.""" + A = numpy.pad(A, [(0, embedded_dim - A.shape[0]), (0, 0)]) + return tuple(row[:, None] for row in A) + + def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): """Dubiner recurrence from (Kirby 2010)""" - if dim == 0: + if dim == 0 or n == 0: return elif dim == 1: idx = lambda p: p @@ -44,12 +55,12 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): raise ValueError("Invalid number of spatial dimensions") skip_derivs = dphi is None - x, y, z = ref_pts + x, y, z = pad_coordinates(ref_pts, 3) f0 = 0.5 * (y + z) f1 = x + f0 + 1. f2 = f0 ** 2 if jacobian is not None: - dx, dy, dz = jacobian + dx, dy, dz = pad_jacobian(jacobian, 3) df0 = 0.5 * (dy + dz) df1 = dx + df0 df2 = 2 * f0 * df0 @@ -69,7 +80,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] if skip_derivs: continue - dphi[inext] = (a * f1 * dphi[icur] + a * phi[icur] * df1 - + dphi[inext] = (a * (f1 * dphi[icur] + phi[icur] * df1) - b * (f2 * dphi[iprev] + phi[iprev] * df2)) if dim < 2: return @@ -261,44 +272,35 @@ def _mapping(self, pts): return [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) for i in range(m1)] - def _pad_coordinates(self, ref_pts): - x, y, z = tuple(ref_pts) + (-1., )*(3 - len(ref_pts)) - return x, y, z - - def _pad_jacobian(self): - A = numpy.pad(self.A, [(0, 3 - self.A.shape[0]), (0, 0)]) - dx, dy, dz = tuple(row[:, None] for row in A) - return dx, dy, dz - def _tabulate(self, n, pts): """A version of tabulate() that also works for a single point. """ - dim = self.ref_el.get_spatial_dimension() + D = self.ref_el.get_spatial_dimension() results = [None] * self.get_num_members(n) - results[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) + results[0] = sum((pts[i] - pts[i] for i in range(D)), 1.) if n == 0: return results ref_pts = self._mapping(pts) - recurrence(dim, n, self._pad_coordinates(ref_pts), results) + recurrence(D, n, ref_pts, results) self._normalize(n, results) return results def _tabulate_derivatives(self, n, pts): """A version of tabulate_derivatives() that also works for a single point. """ - dim = self.ref_el.get_spatial_dimension() + D = self.ref_el.get_spatial_dimension() num_members = self.get_num_members(n) phi = [None] * num_members dphi = [None] * num_members - phi[0] = sum((pts[i] - pts[i] for i in range(dim)), 1.) + phi[0] = sum((pts[i] - pts[i] for i in range(D)), 1.) dphi[0] = pts - pts if n == 0: return phi, dphi ref_pts = self._mapping(pts) - recurrence(dim, n, self._pad_coordinates(ref_pts), phi, - jacobian=self._pad_jacobian(), dphi=dphi) + recurrence(D, n, ref_pts, phi, + jacobian=self.A, dphi=dphi) self._normalize(n, phi) self._normalize(n, dphi) return phi, dphi @@ -306,21 +308,16 @@ def _tabulate_derivatives(self, n, pts): def tabulate(self, n, pts): if len(pts) == 0: return numpy.array([]) - else: - return numpy.array(self._tabulate(n, numpy.array(pts).T)) + return numpy.array(self._tabulate(n, numpy.transpose(pts))) def tabulate_derivatives(self, n, pts): - order = 1 D = self.ref_el.get_spatial_dimension() - data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) - # Put data in the required data structure, i.e., - # k-tuples which contain the value, and the k-1 derivatives - # (gradient, Hessian, ...) - m, n = data[0].shape - data2 = [[tuple([data[r][i][j] for r in range(order+1)]) - for j in range(n)] - for i in range(m)] - return data2 + vals, deriv_vals = self._tabulate_derivatives(n, numpy.transpose(pts)) + # Create the ordinary data structure. + data = [[(vals[i][j], [deriv_vals[i][r][j] for r in range(D)]) + for j in range(len(vals[0]))] + for i in range(len(vals))] + return data def tabulate_jet(self, n, pts, order=1): D = self.ref_el.get_spatial_dimension() @@ -409,39 +406,27 @@ def _normalize(self, n, phi): for p in range(n + 1): phi[p] *= math.sqrt(p + 0.5) - def tabulate(self, n, pts): + def _tabulate(self, n, pts): """Returns a numpy array A[i,j] = phi_i(pts[j])""" if len(pts) > 0: - ref_pts = numpy.array([self.mapping(pt) for pt in pts]) + ref_pts = self._mapping(pts).T results = jacobi.eval_jacobi_batch(0, 0, n, ref_pts) self._normalize(n, results) return results else: return [] - def tabulate_derivatives(self, n, pts): - """Returns a tuple of length one (A,) such that - A[i,j] = D phi_i(pts[j]). The tuple is returned for - compatibility with the interfaces of the triangle and - tetrahedron expansions.""" - ref_pts = numpy.array([self.mapping(pt) for pt in pts]) - results = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts) + def _tabulate_derivatives(self, n, pts): + """Returns a tuple of (vals, derivs) such that + vals[i,j] = phi_i(pts[j]), derivs[i,j] = D vals[i,j].""" + ref_pts = self._mapping(pts).T + derivs = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts) # Jacobi polynomials defined on [-1, 1], first derivatives need scaling - results *= 2.0 / self.ref_el.volume() - self._normalize(n, results) - - vals = self.tabulate(n, pts) - deriv_vals = (results,) - - # Create the ordinary data structure. - dv = [] - for i in range(vals.shape[0]): - dv.append([]) - for j in range(vals.shape[1]): - dv[-1].append((vals[i][j], [deriv_vals[0][i][j]])) - - return dv + derivs *= 2.0 / self.ref_el.volume() + self._normalize(n, derivs) + vals = self._tabulate(n, pts) + return vals, derivs[:, None, :] class TriangleExpansionSet(ExpansionSet): From 7bbb3afc5baa68fe1c018490a17d44fdc13f78a9 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 5 Nov 2023 11:35:42 +0000 Subject: [PATCH 40/76] Fix tabulate_jet --- FIAT/expansions.py | 21 ++++++++++++++------- FIAT/polynomial_set.py | 10 +++++++--- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 9d5e18f79..86ac1e5dd 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -347,7 +347,7 @@ def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} D = self.ref_el.get_spatial_dimension() - if order == 0 or degree == 0 or D == 0: + if order == 0: base_vals = self.tabulate(degree, pts) else: v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) @@ -357,15 +357,22 @@ def _tabulate_jet(self, degree, pts, order=0): for alpha in alphas: result[alpha] = next(dv for dv, ai in zip(dtildes, alpha) if ai > 0) + def distance(alpha, beta): + return sum(ai != bi for ai, bi in zip(alpha, beta)) + + base_alpha = (0,) * D # Only use dmats if order > 1 dmats = self.get_dmats(degree) if order > 1 else [] - for i in range(0, order + 1): + for i in range(order + 1): alphas = mis(D, i) for alpha in alphas: - beta = next((beta for beta in sorted(result, reverse=True) - if all(bj <= aj for bj, aj in zip(beta, alpha))), (0,) * len(alpha)) - vals = base_vals if sum(beta) == 0 else result[beta] - for dmat, start, end in zip(dmats, beta, alpha): + if alpha in result: + continue + if len(result) > 0 and i > 0: + base_alpha = next(a for a in result if sum(a) == i-1 and distance(alpha, a) == 1) + base_vals = result[base_alpha] + vals = base_vals + for dmat, start, end in zip(dmats, base_alpha, alpha): for j in range(start, end): vals = numpy.dot(dmat.T, vals) result[alpha] = vals @@ -445,7 +452,7 @@ def _normalize(self, n, phi): class TetrahedronExpansionSet(ExpansionSet): - """Collapsed orthonormal polynomial expanion on a tetrahedron.""" + """Collapsed orthonormal polynomial expansion on a tetrahedron.""" def __init__(self, ref_el): if ref_el.get_spatial_dimension() != 3: raise Exception("Must be a tetrahedron") diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 8ce1204d0..2fb247f12 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -69,9 +69,13 @@ def tabulate_new(self, pts): def tabulate(self, pts, jet_order=0): """Returns the values of the polynomial set.""" - result = self.expansion_set._tabulate_jet(self.embedded_degree, pts, order=jet_order) - for alpha in result: - result[alpha] = numpy.dot(self.coeffs, result[alpha]) + base_vals = self.expansion_set._tabulate_jet(self.embedded_degree, pts, order=jet_order) + D = self.ref_el.get_spatial_dimension() + result = {} + for i in range(jet_order + 1): + alphas = mis(D, i) + for alpha in alphas: + result[alpha] = numpy.dot(self.coeffs, base_vals[alpha]) return result def get_expansion_set(self): From 2eae684896149710319c9024f34c3c7642e18dca Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 5 Nov 2023 14:15:10 +0000 Subject: [PATCH 41/76] normalize inside recurrence() --- FIAT/expansions.py | 53 ++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 86ac1e5dd..b108c40dd 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -55,6 +55,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): raise ValueError("Invalid number of spatial dimensions") skip_derivs = dphi is None + results = (phi, ) if skip_derivs else (phi, dphi) x, y, z = pad_coordinates(ref_pts, 3) f0 = 0.5 * (y + z) f1 = x + f0 + 1. @@ -65,7 +66,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): df1 = dx + df0 df2 = 2 * f0 * df0 - # p = 1 + # handle p = 1 icur = idx(0) inext = idx(1) phi[inext] = f1 @@ -82,6 +83,10 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): continue dphi[inext] = (a * (f1 * dphi[icur] + phi[icur] * df1) - b * (f2 * dphi[iprev] + phi[iprev] * df2)) + # normalize in p + for result in results: + for p in range(n + 1): + result[idx(p)] *= math.sqrt(p + 0.5) if dim < 2: return @@ -94,7 +99,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): df5 = 2 * f4 * df4 for p in range(n): - # q = 1 + # handle q = 1 icur = idx(p, 0) inext = idx(p, 1) a = p + 1.5 @@ -115,12 +120,17 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): dqcoef = aq * df3 + (aq - bq) * df4 dphi[inext] = (qcoef * dphi[icur] + phi[icur] * dqcoef - cq * (f5 * dphi[iprev] + phi[iprev] * df5)) + # normalize in p + q + for result in results: + for p in range(n + 1): + for q in range(n - p + 1): + result[idx(p, q)] *= math.sqrt(p + q + 1.0) if dim < 3: return for p in range(n): for q in range(n - p): - # r = 1 + # handle r = 1 icur = idx(p, q, 0) inext = idx(p, q, 1) a = 2.0 + p + q @@ -139,6 +149,12 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): if skip_derivs: continue dphi[inext] = rcoef * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] + # normalize in p + q + r + for result in results: + for p in range(n + 1): + for q in range(n - p + 1): + for r in range(n - p - q + 1): + result[idx(p, q, r)] *= math.sqrt(p + q + r + 1.5) def _tabulate_dpts(tabulator, D, n, order, pts): @@ -283,7 +299,6 @@ def _tabulate(self, n, pts): ref_pts = self._mapping(pts) recurrence(D, n, ref_pts, results) - self._normalize(n, results) return results def _tabulate_derivatives(self, n, pts): @@ -301,8 +316,6 @@ def _tabulate_derivatives(self, n, pts): ref_pts = self._mapping(pts) recurrence(D, n, ref_pts, phi, jacobian=self.A, dphi=dphi) - self._normalize(n, phi) - self._normalize(n, dphi) return phi, dphi def tabulate(self, n, pts): @@ -360,9 +373,9 @@ def _tabulate_jet(self, degree, pts, order=0): def distance(alpha, beta): return sum(ai != bi for ai, bi in zip(alpha, beta)) - base_alpha = (0,) * D # Only use dmats if order > 1 dmats = self.get_dmats(degree) if order > 1 else [] + base_alpha = (0,) * D for i in range(order + 1): alphas = mis(D, i) for alpha in alphas: @@ -409,16 +422,13 @@ def __init__(self, ref_el): raise Exception("Must have a line") super(LineExpansionSet, self).__init__(ref_el) - def _normalize(self, n, phi): - for p in range(n + 1): - phi[p] *= math.sqrt(p + 0.5) - def _tabulate(self, n, pts): """Returns a numpy array A[i,j] = phi_i(pts[j])""" if len(pts) > 0: ref_pts = self._mapping(pts).T results = jacobi.eval_jacobi_batch(0, 0, n, ref_pts) - self._normalize(n, results) + for p in range(n + 1): + results[p] *= math.sqrt(p + 0.5) return results else: return [] @@ -431,9 +441,9 @@ def _tabulate_derivatives(self, n, pts): # Jacobi polynomials defined on [-1, 1], first derivatives need scaling derivs *= 2.0 / self.ref_el.volume() - self._normalize(n, derivs) - vals = self._tabulate(n, pts) - return vals, derivs[:, None, :] + for p in range(n + 1): + derivs[p] *= math.sqrt(p + 0.5) + return self._tabulate(n, pts), derivs[:, None, :] class TriangleExpansionSet(ExpansionSet): @@ -444,12 +454,6 @@ def __init__(self, ref_el): raise Exception("Must have a triangle") super(TriangleExpansionSet, self).__init__(ref_el) - def _normalize(self, n, phi): - idx = morton_index2 - for p in range(n + 1): - for q in range(n - p + 1): - phi[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0)) - class TetrahedronExpansionSet(ExpansionSet): """Collapsed orthonormal polynomial expansion on a tetrahedron.""" @@ -458,13 +462,6 @@ def __init__(self, ref_el): raise Exception("Must be a tetrahedron") super(TetrahedronExpansionSet, self).__init__(ref_el) - def _normalize(self, n, phi): - idx = morton_index3 - for p in range(n + 1): - for q in range(n - p + 1): - for r in range(n - p - q + 1): - phi[idx(p, q, r)] *= math.sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r + 1.5)) - def polynomial_dimension(ref_el, degree): """Returns the dimension of the space of polynomials of degree no From 21c429e972bf8ed7dfc74447be01eddf20aa6951 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 5 Nov 2023 15:17:35 +0000 Subject: [PATCH 42/76] cleanup --- FIAT/expansions.py | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index b108c40dd..799c52f89 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -43,18 +43,15 @@ def pad_jacobian(A, embedded_dim): def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): """Dubiner recurrence from (Kirby 2010)""" + skip_derivs = dphi is None + phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) + if not skip_derivs: + dphi[0] = ref_pts - ref_pts if dim == 0 or n == 0: return - elif dim == 1: - idx = lambda p: p - elif dim == 2: - idx = morton_index2 - elif dim == 3: - idx = morton_index3 - else: + if dim > 3 or dim < 0: raise ValueError("Invalid number of spatial dimensions") - - skip_derivs = dphi is None + idx = (lambda p: p, morton_index2, morton_index3)[dim-1] results = (phi, ) if skip_derivs else (phi, dphi) x, y, z = pad_coordinates(ref_pts, 3) f0 = 0.5 * (y + z) @@ -293,10 +290,6 @@ def _tabulate(self, n, pts): """ D = self.ref_el.get_spatial_dimension() results = [None] * self.get_num_members(n) - results[0] = sum((pts[i] - pts[i] for i in range(D)), 1.) - if n == 0: - return results - ref_pts = self._mapping(pts) recurrence(D, n, ref_pts, results) return results @@ -308,14 +301,8 @@ def _tabulate_derivatives(self, n, pts): num_members = self.get_num_members(n) phi = [None] * num_members dphi = [None] * num_members - phi[0] = sum((pts[i] - pts[i] for i in range(D)), 1.) - dphi[0] = pts - pts - if n == 0: - return phi, dphi - ref_pts = self._mapping(pts) - recurrence(D, n, ref_pts, phi, - jacobian=self.A, dphi=dphi) + recurrence(D, n, ref_pts, phi, jacobian=self.A, dphi=dphi) return phi, dphi def tabulate(self, n, pts): From eae8bf4e43da013951783d37e8fb38a4894df7ed Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Sun, 5 Nov 2023 17:59:04 +0000 Subject: [PATCH 43/76] Test orthogonality, more unified recurrences --- FIAT/expansions.py | 71 ++++++++++++++++++++++-------------------- test/unit/test_fiat.py | 13 ++++++++ 2 files changed, 50 insertions(+), 34 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 799c52f89..0ab33d377 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -51,6 +51,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): return if dim > 3 or dim < 0: raise ValueError("Invalid number of spatial dimensions") + idx = (lambda p: p, morton_index2, morton_index3)[dim-1] results = (phi, ) if skip_derivs else (phi, dphi) x, y, z = pad_coordinates(ref_pts, 3) @@ -73,13 +74,12 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): for p in range(1, n): iprev, icur = icur, inext inext = idx(p + 1) - a = (2. * p + 1.) / (1. + p) - b = p / (1. + p) - phi[inext] = a * f1 * phi[icur] - b * f2 * phi[iprev] + a, b, c = jrc(0, 0, p) + phi[inext] = a * f1 * phi[icur] - c * f2 * phi[iprev] if skip_derivs: continue dphi[inext] = (a * (f1 * dphi[icur] + phi[icur] * df1) - - b * (f2 * dphi[iprev] + phi[iprev] * df2)) + c * (f2 * dphi[iprev] + phi[iprev] * df2)) # normalize in p for result in results: for p in range(n + 1): @@ -87,36 +87,38 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): if dim < 2: return - f3 = 1. + y - f4 = 0.5 * (z - 1.) + f4 = 0.5 * (1. - z) + f3 = y - f4 + 1. f5 = f4 ** 2 if jacobian is not None: - df3 = dy - df4 = 0.5 * dz + df4 = -0.5 * dz + df3 = dy - df4 df5 = 2 * f4 * df4 for p in range(n): # handle q = 1 icur = idx(p, 0) inext = idx(p, 1) - a = p + 1.5 - qcoef = a * f3 + f4 - phi[inext] = qcoef * phi[icur] + alpha = 2 * p + 1 + b = 0.5 * alpha + a = b + 1. + factor = a * f3 + b * f4 + phi[inext] = factor * phi[icur] if not skip_derivs: - dqcoef = a * df3 + df4 - dphi[inext] = qcoef * dphi[icur] + phi[icur] * dqcoef + dfactor = a * df3 + b * df4 + dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor # general q by recurrence for q in range(1, n - p): iprev, icur = icur, inext inext = idx(p, q + 1) - aq, bq, cq = jrc(2 * p + 1, 0, q) - qcoef = aq * f3 + (aq - bq) * f4 - phi[inext] = qcoef * phi[icur] - cq*(f5 * phi[iprev]) + a, b, c = jrc(alpha, 0, q) + factor = a * f3 + b * f4 + phi[inext] = factor * phi[icur] - c * (f5 * phi[iprev]) if skip_derivs: continue - dqcoef = aq * df3 + (aq - bq) * df4 - dphi[inext] = (qcoef * dphi[icur] + phi[icur] * dqcoef - - cq * (f5 * dphi[iprev] + phi[iprev] * df5)) + dfactor = a * df3 + b * df4 + dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - + c * (f5 * dphi[iprev] + phi[iprev] * df5)) # normalize in p + q for result in results: for p in range(n + 1): @@ -130,22 +132,25 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): # handle r = 1 icur = idx(p, q, 0) inext = idx(p, q, 1) - a = 2.0 + p + q - b = 1.0 + p + q - rcoef = a * z + b - phi[inext] = rcoef * phi[icur] + alpha = 2 * (p + q) + 2 + b = 0.5 * alpha + a = b + 1. + factor = a * z + b + phi[inext] = factor * phi[icur] if not skip_derivs: - dphi[inext] = rcoef * dphi[icur] + a * phi[icur] * dz + dfactor = a * dz + dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor # general r by recurrence for r in range(1, n - p - q): iprev, icur = icur, inext inext = idx(p, q, r + 1) - ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r) - rcoef = ar * z + br - phi[inext] = rcoef * phi[icur] - cr * phi[iprev] + a, b, c = jrc(alpha, 0, r) + factor = a * z + b + phi[inext] = factor * phi[icur] - c * phi[iprev] if skip_derivs: continue - dphi[inext] = rcoef * dphi[icur] + ar * phi[icur] * dz - cr * dphi[iprev] + dfactor = a * dz + dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor - c * dphi[iprev] # normalize in p + q + r for result in results: for p in range(n + 1): @@ -278,11 +283,11 @@ def get_num_members(self, n): return num_members def _mapping(self, pts): - if isinstance(pts, numpy.ndarray): + if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2: return numpy.dot(self.A, pts) + self.b[:, None] else: m1, m2 = self.A.shape - return [sum((self.A[i][j] * pts[j] for j in range(m2)), self.b[i]) + return [sum((self.A[i, j] * pts[j] for j in range(m2)), self.b[i]) for i in range(m1)] def _tabulate(self, n, pts): @@ -290,8 +295,7 @@ def _tabulate(self, n, pts): """ D = self.ref_el.get_spatial_dimension() results = [None] * self.get_num_members(n) - ref_pts = self._mapping(pts) - recurrence(D, n, ref_pts, results) + recurrence(D, n, self._mapping(pts), results) return results def _tabulate_derivatives(self, n, pts): @@ -301,8 +305,7 @@ def _tabulate_derivatives(self, n, pts): num_members = self.get_num_members(n) phi = [None] * num_members dphi = [None] * num_members - ref_pts = self._mapping(pts) - recurrence(D, n, ref_pts, phi, jacobian=self.A, dphi=dphi) + recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) return phi, dphi def tabulate(self, n, pts): diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 05efd3776..589a5014d 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -517,6 +517,19 @@ def test_error_quadrature_degree(element): eval(element) +@pytest.mark.parametrize('cell', [I, T, S]) +def test_expansion_orthonormality(cell): + from FIAT import expansions, quadrature + degree = 10 + rule = quadrature.make_quadrature(cell, degree + 1) + U = expansions.ExpansionSet(cell) + phi = U.tabulate(degree, rule.pts) + w = rule.get_weights() + scale = 0.5 ** -cell.get_spatial_dimension() + results = scale * np.dot(phi, w[:, None] * phi.T) + assert np.allclose(results, np.eye(results.shape[0])) + + if __name__ == '__main__': import os pytest.main(os.path.abspath(__file__)) From 505e669cacd620ee75382931c9c6641928dd23f4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 10:57:18 +0000 Subject: [PATCH 44/76] even more unified recurrence --- FIAT/expansions.py | 115 ++++++++++++++++++--------------------------- 1 file changed, 46 insertions(+), 69 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 0ab33d377..5bd85d388 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -53,33 +53,47 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): raise ValueError("Invalid number of spatial dimensions") idx = (lambda p: p, morton_index2, morton_index3)[dim-1] + + def _recurrence1(n, fixed_indices, f1, f2, f3, df1, df2, df3): + # handle i = 1 + icur = idx(*fixed_indices, 0) + inext = idx(*fixed_indices, 1) + alpha = 2 * sum(fixed_indices) + len(fixed_indices) + b = 0.5 * alpha + a = b + 1. + factor = a * f1 + b * f2 + phi[inext] = factor * phi[icur] + if not skip_derivs: + dfactor = a * df1 + b * df2 + dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor + # general i by recurrence + for i in range(1, n - sum(fixed_indices)): + iprev, icur, inext = icur, inext, idx(*fixed_indices, i + 1) + a, b, c = jrc(alpha, 0, i) + factor = a * f1 + b * f2 + phi[inext] = factor * phi[icur] - c * (f3 * phi[iprev]) + if skip_derivs: + continue + dfactor = a * df1 + b * df2 + dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - + c * (f3 * dphi[iprev] + phi[iprev] * df3)) + results = (phi, ) if skip_derivs else (phi, dphi) x, y, z = pad_coordinates(ref_pts, 3) - f0 = 0.5 * (y + z) - f1 = x + f0 + 1. - f2 = f0 ** 2 + + # recurruence in p + f1 = 0.5 * (y + z) + f0 = x + f1 + 1. + f2 = f1 ** 2 if jacobian is not None: dx, dy, dz = pad_jacobian(jacobian, 3) - df0 = 0.5 * (dy + dz) - df1 = dx + df0 - df2 = 2 * f0 * df0 - - # handle p = 1 - icur = idx(0) - inext = idx(1) - phi[inext] = f1 - if not skip_derivs: - dphi[inext] = 0. * dphi[icur] + df1 - # general p by recurrence - for p in range(1, n): - iprev, icur = icur, inext - inext = idx(p + 1) - a, b, c = jrc(0, 0, p) - phi[inext] = a * f1 * phi[icur] - c * f2 * phi[iprev] - if skip_derivs: - continue - dphi[inext] = (a * (f1 * dphi[icur] + phi[icur] * df1) - - c * (f2 * dphi[iprev] + phi[iprev] * df2)) + df1 = 0.5 * (dy + dz) + df0 = dx + df1 + df2 = 2 * f1 * df1 + else: + df0 = df1 = df2 = None + _recurrence1(n, tuple(), f0, f1, f2, df0, df1, df2) + # normalize in p for result in results: for p in range(n + 1): @@ -87,6 +101,7 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): if dim < 2: return + # recurrence in q f4 = 0.5 * (1. - z) f3 = y - f4 + 1. f5 = f4 ** 2 @@ -94,31 +109,11 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): df4 = -0.5 * dz df3 = dy - df4 df5 = 2 * f4 * df4 - + else: + df3 = df4 = df5 = None for p in range(n): - # handle q = 1 - icur = idx(p, 0) - inext = idx(p, 1) - alpha = 2 * p + 1 - b = 0.5 * alpha - a = b + 1. - factor = a * f3 + b * f4 - phi[inext] = factor * phi[icur] - if not skip_derivs: - dfactor = a * df3 + b * df4 - dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor - # general q by recurrence - for q in range(1, n - p): - iprev, icur = icur, inext - inext = idx(p, q + 1) - a, b, c = jrc(alpha, 0, q) - factor = a * f3 + b * f4 - phi[inext] = factor * phi[icur] - c * (f5 * phi[iprev]) - if skip_derivs: - continue - dfactor = a * df3 + b * df4 - dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - - c * (f5 * dphi[iprev] + phi[iprev] * df5)) + _recurrence1(n, (p,), f3, f4, f5, df3, df4, df5) + # normalize in p + q for result in results: for p in range(n + 1): @@ -127,30 +122,12 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): if dim < 3: return + # recurrence in q + dfactors = (None,)*3 if skip_derivs else (dz, 0. * dz, 0. * dz) for p in range(n): for q in range(n - p): - # handle r = 1 - icur = idx(p, q, 0) - inext = idx(p, q, 1) - alpha = 2 * (p + q) + 2 - b = 0.5 * alpha - a = b + 1. - factor = a * z + b - phi[inext] = factor * phi[icur] - if not skip_derivs: - dfactor = a * dz - dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor - # general r by recurrence - for r in range(1, n - p - q): - iprev, icur = icur, inext - inext = idx(p, q, r + 1) - a, b, c = jrc(alpha, 0, r) - factor = a * z + b - phi[inext] = factor * phi[icur] - c * phi[iprev] - if skip_derivs: - continue - dfactor = a * dz - dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor - c * dphi[iprev] + _recurrence1(n, (p, q), z, 1., 1., *dfactors) + # normalize in p + q + r for result in results: for p in range(n + 1): From 375590a7e48f3a2a8effff3be0e27f3e0483e005 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 13:57:50 +0000 Subject: [PATCH 45/76] unify factors --- FIAT/expansions.py | 63 +++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 5bd85d388..3764da6c0 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -54,45 +54,52 @@ def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): idx = (lambda p: p, morton_index2, morton_index3)[dim-1] - def _recurrence1(n, fixed_indices, f1, f2, f3, df1, df2, df3): + def jacobi_factors(x, y, z, dx, dy, dz): + fb = 0.5 * (y + z) + fa = x + fb + 1.0 + fc = fb ** 2 + dfa = dfb = dfc = None + if dx is not None: + dfb = 0.5 * (dy + dz) + dfa = dx + dfb + dfc = 2 * fb * dfb + return fa, fb, fc, dfa, dfb, dfc + + def jacobi_recurrence(n, fixed_indices, fa, fb, fc, dfa, dfb, dfc): + """Jacobi recurrence with variable coefficients""" # handle i = 1 icur = idx(*fixed_indices, 0) inext = idx(*fixed_indices, 1) alpha = 2 * sum(fixed_indices) + len(fixed_indices) b = 0.5 * alpha - a = b + 1. - factor = a * f1 + b * f2 + a = b + 1.0 + factor = a * fa - b * fb phi[inext] = factor * phi[icur] if not skip_derivs: - dfactor = a * df1 + b * df2 + dfactor = a * dfa - b * dfb dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor # general i by recurrence for i in range(1, n - sum(fixed_indices)): iprev, icur, inext = icur, inext, idx(*fixed_indices, i + 1) a, b, c = jrc(alpha, 0, i) - factor = a * f1 + b * f2 - phi[inext] = factor * phi[icur] - c * (f3 * phi[iprev]) + factor = a * fa - b * fb + phi[inext] = factor * phi[icur] - c * (fc * phi[iprev]) if skip_derivs: continue - dfactor = a * df1 + b * df2 + dfactor = a * dfa - b * dfb dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - - c * (f3 * dphi[iprev] + phi[iprev] * df3)) + c * (fc * dphi[iprev] + phi[iprev] * dfc)) results = (phi, ) if skip_derivs else (phi, dphi) - x, y, z = pad_coordinates(ref_pts, 3) + x, y, z, w = pad_coordinates(ref_pts, 4) + if jacobian is None: + dx = dy = dz = dw = None + else: + dx, dy, dz, dw = pad_jacobian(jacobian, 4) # recurruence in p - f1 = 0.5 * (y + z) - f0 = x + f1 + 1. - f2 = f1 ** 2 - if jacobian is not None: - dx, dy, dz = pad_jacobian(jacobian, 3) - df1 = 0.5 * (dy + dz) - df0 = dx + df1 - df2 = 2 * f1 * df1 - else: - df0 = df1 = df2 = None - _recurrence1(n, tuple(), f0, f1, f2, df0, df1, df2) + factors = jacobi_factors(x, y, z, dx, dy, dz) + jacobi_recurrence(n, tuple(), *factors) # normalize in p for result in results: @@ -102,17 +109,9 @@ def _recurrence1(n, fixed_indices, f1, f2, f3, df1, df2, df3): return # recurrence in q - f4 = 0.5 * (1. - z) - f3 = y - f4 + 1. - f5 = f4 ** 2 - if jacobian is not None: - df4 = -0.5 * dz - df3 = dy - df4 - df5 = 2 * f4 * df4 - else: - df3 = df4 = df5 = None + factors = jacobi_factors(y, z, w, dy, dz, dw) for p in range(n): - _recurrence1(n, (p,), f3, f4, f5, df3, df4, df5) + jacobi_recurrence(n, (p,), *factors) # normalize in p + q for result in results: @@ -123,10 +122,10 @@ def _recurrence1(n, fixed_indices, f1, f2, f3, df1, df2, df3): return # recurrence in q - dfactors = (None,)*3 if skip_derivs else (dz, 0. * dz, 0. * dz) + factors = jacobi_factors(z, w, w, dz, dw, dw) for p in range(n): for q in range(n - p): - _recurrence1(n, (p, q), z, 1., 1., *dfactors) + jacobi_recurrence(n, (p, q), *factors) # normalize in p + q + r for result in results: From 1eb4774d0890fa6550b2401c5dd1c30a82fc5269 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 15:51:56 +0000 Subject: [PATCH 46/76] test a few expansion values --- FIAT/expansions.py | 13 ++++----- test/unit/test_fiat.py | 66 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 9 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 3764da6c0..819c9b778 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -41,7 +41,7 @@ def pad_jacobian(A, embedded_dim): return tuple(row[:, None] for row in A) -def recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): +def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): """Dubiner recurrence from (Kirby 2010)""" skip_derivs = dphi is None phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) @@ -252,11 +252,8 @@ def __init__(self, ref_el): self._dmats_cache = {} def get_num_members(self, n): - dim = self.ref_el.get_spatial_dimension() - num_members = 1 - for k in range(1, dim+1): - num_members = (num_members * (n + k)) // k - return num_members + D = self.ref_el.get_spatial_dimension() + return math.comb(n + D, D) def _mapping(self, pts): if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2: @@ -271,7 +268,7 @@ def _tabulate(self, n, pts): """ D = self.ref_el.get_spatial_dimension() results = [None] * self.get_num_members(n) - recurrence(D, n, self._mapping(pts), results) + dubiner_recurrence(D, n, self._mapping(pts), results) return results def _tabulate_derivatives(self, n, pts): @@ -281,7 +278,7 @@ def _tabulate_derivatives(self, n, pts): num_members = self.get_num_members(n) phi = [None] * num_members dphi = [None] * num_members - recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) + dubiner_recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) return phi, dphi def tabulate(self, n, pts): diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 589a5014d..d4e08becf 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -520,9 +520,9 @@ def test_error_quadrature_degree(element): @pytest.mark.parametrize('cell', [I, T, S]) def test_expansion_orthonormality(cell): from FIAT import expansions, quadrature + U = expansions.ExpansionSet(cell) degree = 10 rule = quadrature.make_quadrature(cell, degree + 1) - U = expansions.ExpansionSet(cell) phi = U.tabulate(degree, rule.pts) w = rule.get_weights() scale = 0.5 ** -cell.get_spatial_dimension() @@ -530,6 +530,70 @@ def test_expansion_orthonormality(cell): assert np.allclose(results, np.eye(results.shape[0])) +@pytest.mark.parametrize('dim', range(1, 4)) +def test_expansion_values(dim): + import sympy + from FIAT import expansions, reference_element + half = sympy.Rational(1, 2) + cell = reference_element.default_simplex(dim) + U = expansions.ExpansionSet(cell) + dpoints = [] + rpoints = [] + + npoints = 10 + interior = 1 + for alpha in reference_element.lattice_iter(interior, npoints+1-interior, dim): + dpoints.append(tuple(2*np.array(alpha, dtype="d")/npoints-1)) + rpoints.append(tuple(2*sympy.Rational(a, npoints)-1 for a in alpha)) + + n = 48 + eta = sympy.DeferredVector("eta") + Uvals = U.tabulate(n, dpoints) + if dim == 1: + for p in range(n + 1): + f = sympy.jacobi_poly(p, 0, 0, eta[0]) + f *= sympy.sqrt((half + p)) + vals = Uvals[p] + error = 0.0 + for pt, val in zip(rpoints, vals): + fval = f.subs(eta[0], pt[0]) + error = max(error, abs(val - float(fval))) + assert error < 1E-13 + elif dim == 2: + idx = expansions.morton_index2 + for p in range(n + 1): + q = n - p + f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * + sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p) + f *= sympy.sqrt((half + p) * (1 + p + q)) + vals = Uvals[idx(p, q)] + error = 0.0 + for pt, val in zip(rpoints, vals): + eta0 = 2 * (1 + pt[0]) / (1 - pt[1]) - 1 + eta1 = pt[1] + fval = f.subs(eta[1], eta1).subs(eta[0], eta0) + error = max(error, abs(val - float(fval))) + assert error < 1E-13 + elif dim == 3: + idx = expansions.morton_index3 + for r in range(n + 1): + q = n - r + p = n - r - q + f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * + sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p * + sympy.jacobi_poly(r, 2*p+2*q+2, 0, eta[2]) * ((1 - eta[2])/2) ** (p+q)) + f *= sympy.sqrt((half + p) * (1 + p + q) * (1+half + p + q + r)) + vals = Uvals[idx(p, q, r)] + error = 0.0 + for pt, val in zip(rpoints, vals): + eta0 = 2 * (1 + pt[0]) / (-pt[1] - pt[2]) - 1 + eta1 = 2 * (1 + pt[1]) / (1 - pt[2]) - 1 + eta2 = pt[2] + fval = f.subs(eta[2], eta2).subs(eta[1], eta1).subs(eta[0], eta0) + error = max(error, abs(val - float(fval))) + assert error < 1E-13 + + if __name__ == '__main__': import os pytest.main(os.path.abspath(__file__)) From dc98d1579389be28d428d44bc68c49621673e94a Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 18:44:18 +0000 Subject: [PATCH 47/76] fully unified recurrences --- FIAT/expansions.py | 130 ++++++++++++++++++--------------------------- 1 file changed, 52 insertions(+), 78 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 819c9b778..96f0ed35a 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -41,6 +41,18 @@ def pad_jacobian(A, embedded_dim): return tuple(row[:, None] for row in A) +def jacobi_factors(x, y, z, dx, dy, dz): + fb = 0.5 * (y + z) + fa = x + fb + 1.0 + fc = fb ** 2 + dfa = dfb = dfc = None + if dx is not None: + dfb = 0.5 * (dy + dz) + dfa = dx + dfb + dfc = 2 * fb * dfb + return fa, fb, fc, dfa, dfb, dfc + + def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): """Dubiner recurrence from (Kirby 2010)""" skip_derivs = dphi is None @@ -53,86 +65,48 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): raise ValueError("Invalid number of spatial dimensions") idx = (lambda p: p, morton_index2, morton_index3)[dim-1] - - def jacobi_factors(x, y, z, dx, dy, dz): - fb = 0.5 * (y + z) - fa = x + fb + 1.0 - fc = fb ** 2 - dfa = dfb = dfc = None - if dx is not None: - dfb = 0.5 * (dy + dz) - dfa = dx + dfb - dfc = 2 * fb * dfb - return fa, fb, fc, dfa, dfb, dfc - - def jacobi_recurrence(n, fixed_indices, fa, fb, fc, dfa, dfb, dfc): - """Jacobi recurrence with variable coefficients""" - # handle i = 1 - icur = idx(*fixed_indices, 0) - inext = idx(*fixed_indices, 1) - alpha = 2 * sum(fixed_indices) + len(fixed_indices) - b = 0.5 * alpha - a = b + 1.0 - factor = a * fa - b * fb - phi[inext] = factor * phi[icur] - if not skip_derivs: - dfactor = a * dfa - b * dfb - dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor - # general i by recurrence - for i in range(1, n - sum(fixed_indices)): - iprev, icur, inext = icur, inext, idx(*fixed_indices, i + 1) - a, b, c = jrc(alpha, 0, i) + pad_dim = dim + 2 + X = pad_coordinates(ref_pts, pad_dim) + if jacobian is None: + dX = (None,) * pad_dim + else: + dX = pad_jacobian(jacobian, pad_dim) + + for codim in range(dim): + # Extend the basis from codim to codim + 1 + fa, fb, fc, dfa, dfb, dfc = jacobi_factors(*X[codim:codim+3], *dX[codim:codim+3]) + # Get indices of low-dimensional basis + alphas = [tuple()] if codim == 0 else reference_element.lattice_iter(0, n, codim) + for sub_index in alphas: + # handle i = 1 + icur = idx(*sub_index, 0) + inext = idx(*sub_index, 1) + alpha = 2 * sum(sub_index) + len(sub_index) + b = 0.5 * alpha + a = b + 1.0 factor = a * fa - b * fb - phi[inext] = factor * phi[icur] - c * (fc * phi[iprev]) + phi[inext] = factor * phi[icur] + if not skip_derivs: + dfactor = a * dfa - b * dfb + dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor + # general i by recurrence + for i in range(1, n - sum(sub_index)): + iprev, icur, inext = icur, inext, idx(*sub_index, i + 1) + a, b, c = jrc(alpha, 0, i) + factor = a * fa - b * fb + phi[inext] = factor * phi[icur] - c * (fc * phi[iprev]) + if skip_derivs: + continue + dfactor = a * dfa - b * dfb + dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - + c * (fc * dphi[iprev] + phi[iprev] * dfc)) + # normalize + for alpha in reference_element.lattice_iter(0, n+1, codim+1): + scale = math.sqrt(sum(alpha) + 0.5 * len(alpha)) + phi[idx(*alpha)] *= scale if skip_derivs: continue - dfactor = a * dfa - b * dfb - dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - - c * (fc * dphi[iprev] + phi[iprev] * dfc)) - - results = (phi, ) if skip_derivs else (phi, dphi) - x, y, z, w = pad_coordinates(ref_pts, 4) - if jacobian is None: - dx = dy = dz = dw = None - else: - dx, dy, dz, dw = pad_jacobian(jacobian, 4) - - # recurruence in p - factors = jacobi_factors(x, y, z, dx, dy, dz) - jacobi_recurrence(n, tuple(), *factors) - - # normalize in p - for result in results: - for p in range(n + 1): - result[idx(p)] *= math.sqrt(p + 0.5) - if dim < 2: - return - - # recurrence in q - factors = jacobi_factors(y, z, w, dy, dz, dw) - for p in range(n): - jacobi_recurrence(n, (p,), *factors) - - # normalize in p + q - for result in results: - for p in range(n + 1): - for q in range(n - p + 1): - result[idx(p, q)] *= math.sqrt(p + q + 1.0) - if dim < 3: - return - - # recurrence in q - factors = jacobi_factors(z, w, w, dz, dw, dw) - for p in range(n): - for q in range(n - p): - jacobi_recurrence(n, (p, q), *factors) - - # normalize in p + q + r - for result in results: - for p in range(n + 1): - for q in range(n - p + 1): - for r in range(n - p - q + 1): - result[idx(p, q, r)] *= math.sqrt(p + q + r + 1.5) + dphi[idx(*alpha)] *= scale def _tabulate_dpts(tabulator, D, n, order, pts): @@ -313,7 +287,7 @@ def get_dmats(self, degree): if degree == 0: return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) - pts = reference_element.make_lattice(self.ref_el.get_vertices(), degree, family="gl") + pts = reference_element.make_lattice(self.ref_el.get_vertices(), degree, variant="gl") v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) dv = numpy.array(dv).transpose((1, 2, 0)) dmats = numpy.linalg.solve(numpy.transpose(v), dv) From 0f1a10175991e96047ab0e8ee16819fbbc6987da Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 18:44:51 +0000 Subject: [PATCH 48/76] variants for BDM, RT, and Nedelec? --- FIAT/brezzi_douglas_fortin_marini.py | 13 ++++---- FIAT/brezzi_douglas_marini.py | 11 +++---- FIAT/check_format_variant.py | 4 +-- FIAT/gauss_lobatto_legendre.py | 2 +- FIAT/lagrange.py | 4 +-- FIAT/nedelec.py | 14 ++++---- FIAT/nedelec_second_kind.py | 8 ++--- FIAT/raviart_thomas.py | 6 ++-- FIAT/reference_element.py | 16 ++++++---- test/unit/test_fiat.py | 48 ++++++++++++++-------------- 10 files changed, 63 insertions(+), 63 deletions(-) diff --git a/FIAT/brezzi_douglas_fortin_marini.py b/FIAT/brezzi_douglas_fortin_marini.py index fb8f81bd8..9a68332f3 100644 --- a/FIAT/brezzi_douglas_fortin_marini.py +++ b/FIAT/brezzi_douglas_fortin_marini.py @@ -5,7 +5,7 @@ class BDFMDualSet(dual_set.DualSet): - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant=None): # Initialize containers for map: mesh_entity -> dof number and # dual basis @@ -19,7 +19,7 @@ def __init__(self, ref_el, degree): # codimension 1 facet normals. # note this will die for degree greater than 1. for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -30,7 +30,7 @@ def __init__(self, ref_el, degree): # count as internal nodes. tangent_count = 0 for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1, variant=variant) tangent_count += len(pts_cur) for j in range(len(pts_cur)): pt_cur = pts_cur[j] @@ -46,8 +46,7 @@ def __init__(self, ref_el, degree): cur = 0 # set codimension 1 (edges 2d, faces 3d) dof - pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree) - pts_per_facet = len(pts_facet_0) + pts_per_facet = len(ref_el.make_points(sd - 1, 0, sd + degree)) entity_ids[sd - 1] = {} for i in range(len(t[sd - 1])): @@ -104,13 +103,13 @@ def BDFMSpace(ref_el, order): class BrezziDouglasFortinMarini(finite_element.CiarletElement): """The BDFM element""" - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant=None): if degree != 2: raise Exception("BDFM_k elements only valid for k == 2") poly_set = BDFMSpace(ref_el, degree) - dual = BDFMDualSet(ref_el, degree - 1) + dual = BDFMDualSet(ref_el, degree - 1, variant=variant) formdegree = ref_el.get_spatial_dimension() - 1 super(BrezziDouglasFortinMarini, self).__init__(poly_set, dual, degree, formdegree, mapping="contravariant piola") diff --git a/FIAT/brezzi_douglas_marini.py b/FIAT/brezzi_douglas_marini.py index 233973f41..0554fbb54 100644 --- a/FIAT/brezzi_douglas_marini.py +++ b/FIAT/brezzi_douglas_marini.py @@ -47,11 +47,11 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur) nodes.append(l_cur) - elif variant == "point": + else: # Define each functional for the dual set # codimension 1 facets for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -80,8 +80,7 @@ def __init__(self, ref_el, degree, variant, quad_deg): cur = 0 # set codimension 1 (edges 2d, faces 3d) dof - pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree) - pts_per_facet = len(pts_facet_0) + pts_per_facet = len(ref_el.make_points(sd - 1, 0, sd + degree)) entity_ids[sd - 1] = {} for i in range(len(t[sd - 1])): @@ -106,8 +105,8 @@ class BrezziDouglasMarini(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] - "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] + "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(div)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/check_format_variant.py b/FIAT/check_format_variant.py index 75a54aa0f..8e07c1bb2 100644 --- a/FIAT/check_format_variant.py +++ b/FIAT/check_format_variant.py @@ -12,10 +12,10 @@ def check_format_variant(variant, degree): quad_degree = int(quad_degree) if quad_degree is not None else (degree + 1) if quad_degree < degree + 1: raise ValueError("Warning, quadrature degree should be at least %s" % (degree + 1)) - elif variant == "point": + elif variant in ["equispaced"]: quad_degree = None else: - raise ValueError('Choose either variant="point" or variant="integral"' + raise ValueError('Choose either variant="equispaced" or variant="integral"' 'or variant="integral(Quadrature degree)"') return (variant, quad_degree) diff --git a/FIAT/gauss_lobatto_legendre.py b/FIAT/gauss_lobatto_legendre.py index 51f9bf4db..aec3d089f 100644 --- a/FIAT/gauss_lobatto_legendre.py +++ b/FIAT/gauss_lobatto_legendre.py @@ -18,7 +18,7 @@ class GaussLobattoLegendre(finite_element.CiarletElement): def __init__(self, ref_el, degree): if ref_el.shape not in {LINE, TRIANGLE, TETRAHEDRON}: raise ValueError("Gauss-Lobatto-Legendre elements are only defined on simplices.") - dual = lagrange.LagrangeDualSet(ref_el, degree, family="lgl") + dual = lagrange.LagrangeDualSet(ref_el, degree, variant="gll") if ref_el.shape == LINE: points = [] for node in dual.nodes: diff --git a/FIAT/lagrange.py b/FIAT/lagrange.py index bce14380f..12816ad0a 100644 --- a/FIAT/lagrange.py +++ b/FIAT/lagrange.py @@ -14,7 +14,7 @@ class LagrangeDualSet(dual_set.DualSet): simplices of any dimension. Nodes are point evaluation at equispaced points.""" - def __init__(self, ref_el, degree, family=None): + def __init__(self, ref_el, degree, variant=None): entity_ids = {} nodes = [] entity_permutations = {} @@ -29,7 +29,7 @@ def __init__(self, ref_el, degree, family=None): entity_permutations[dim] = {} perms = {0: [0]} if dim == 0 else make_entity_permutations_simplex(dim, degree - dim) for entity in sorted(top[dim]): - pts_cur = ref_el.make_points(dim, entity, degree, family=family) + pts_cur = ref_el.make_points(dim, entity, degree, variant=variant) nodes_cur = [functional.PointEvaluation(ref_el, x) for x in pts_cur] nnodes_cur = len(nodes_cur) diff --git a/FIAT/nedelec.py b/FIAT/nedelec.py index e419a1eb7..f85845900 100644 --- a/FIAT/nedelec.py +++ b/FIAT/nedelec.py @@ -177,12 +177,12 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - elif variant == "point": + else: num_edges = len(t[1]) # edge tangents for i in range(num_edges): - pts_cur = ref_el.make_points(1, i, degree + 2) + pts_cur = ref_el.make_points(1, i, degree + 2, variant=variant) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) @@ -284,12 +284,12 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - elif variant == "point": + else: num_edges = len(t[1]) for i in range(num_edges): # points to specify P_k on each edge - pts_cur = ref_el.make_points(1, i, degree + 2) + pts_cur = ref_el.make_points(1, i, degree + 2, variant=variant) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) @@ -298,7 +298,7 @@ def __init__(self, ref_el, degree, variant, quad_deg): if degree > 0: # face tangents num_faces = len(t[2]) for i in range(num_faces): # loop over faces - pts_cur = ref_el.make_points(2, i, degree + 2) + pts_cur = ref_el.make_points(2, i, degree + 2, variant=variant) for j in range(len(pts_cur)): # loop over points pt_cur = pts_cur[j] for k in range(2): # loop over tangents @@ -355,8 +355,8 @@ class Nedelec(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] - "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] + "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(curl)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/nedelec_second_kind.py b/FIAT/nedelec_second_kind.py index 3cdf1a0a8..dcb9a5475 100644 --- a/FIAT/nedelec_second_kind.py +++ b/FIAT/nedelec_second_kind.py @@ -109,11 +109,11 @@ def _generate_edge_dofs(self, cell, degree, offset, variant, quad_deg): jj = Pq_at_qpts.shape[0] * e ids[e] = list(range(offset + jj, offset + jj + Pq_at_qpts.shape[0])) - elif variant == "point": + else: for edge in range(len(cell.get_topology()[1])): # Create points for evaluation of tangential components - points = cell.make_points(1, edge, degree + 2) + points = cell.make_points(1, edge, degree + 2, variant=variant) # A tangential component evaluation for each point dofs += [Tangent(cell, edge, point) for point in points] @@ -224,8 +224,8 @@ class NedelecSecondKind(CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] - "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] + "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(curl)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/raviart_thomas.py b/FIAT/raviart_thomas.py index d8233cbe2..f1c8d54b7 100644 --- a/FIAT/raviart_thomas.py +++ b/FIAT/raviart_thomas.py @@ -98,10 +98,10 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - elif variant == "point": + else: # codimension 1 facets for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -150,7 +150,7 @@ class RaviartThomas(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(div)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 5684d14ed..1f8618215 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -69,7 +69,7 @@ def lattice_iter(start, finish, depth): yield jj + [ii] -def make_lattice(verts, n, interior=0, family=None): +def make_lattice(verts, n, interior=0, variant=None): """Constructs a lattice of points on the simplex defined by verts. For example, the 1:st order lattice will be just the vertices. The optional argument interior specifies how many points from @@ -77,9 +77,11 @@ def make_lattice(verts, n, interior=0, family=None): and interior = 0, this function will return the vertices and midpoint, but with interior = 1, it will only return the midpoint.""" - if family is None or family == "equispaced": - family = "equi" - family = _decode_family(family) + if variant is None or variant == "equispaced": + variant = "equi" + elif variant == "gll": + variant = "lgl" + family = _decode_family(variant) D = len(verts) X = numpy.array(verts) get_point = lambda alpha: tuple(numpy.dot(_recursive(D - 1, n, alpha, family), X)) @@ -406,7 +408,7 @@ def compute_face_edge_tangents(self, dim, entity_id): edge_ts.append(vert_coords[dest] - vert_coords[source]) return edge_ts - def make_points(self, dim, entity_id, order, family=None): + def make_points(self, dim, entity_id, order, variant=None): """Constructs a lattice of points on the entity_id:th facet of dimension dim. Order indicates how many points to include in each direction.""" @@ -416,9 +418,9 @@ def make_points(self, dim, entity_id, order, family=None): entity_verts = \ self.get_vertices_of_subcomplex( self.get_topology()[dim][entity_id]) - return make_lattice(entity_verts, order, 1, family=family) + return make_lattice(entity_verts, order, 1, variant=variant) elif dim == self.get_spatial_dimension(): - return make_lattice(self.get_vertices(), order, 1, family=family) + return make_lattice(self.get_vertices(), order, 1, variant=variant) else: raise ValueError("illegal dimension") diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index d4e08becf..8edcec530 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -150,12 +150,12 @@ def __init__(self, a, b): 'RaviartThomas(S, 1, variant="integral(2)")', 'RaviartThomas(S, 2, variant="integral(3)")', 'RaviartThomas(S, 3, variant="integral(4)")', - 'RaviartThomas(T, 1, variant="point")', - 'RaviartThomas(T, 2, variant="point")', - 'RaviartThomas(T, 3, variant="point")', - 'RaviartThomas(S, 1, variant="point")', - 'RaviartThomas(S, 2, variant="point")', - 'RaviartThomas(S, 3, variant="point")', + 'RaviartThomas(T, 1, variant="equispaced")', + 'RaviartThomas(T, 2, variant="equispaced")', + 'RaviartThomas(T, 3, variant="equispaced")', + 'RaviartThomas(S, 1, variant="equispaced")', + 'RaviartThomas(S, 2, variant="equispaced")', + 'RaviartThomas(S, 3, variant="equispaced")', "DiscontinuousRaviartThomas(T, 1)", "DiscontinuousRaviartThomas(T, 2)", "DiscontinuousRaviartThomas(T, 3)", @@ -180,12 +180,12 @@ def __init__(self, a, b): 'BrezziDouglasMarini(S, 1, variant="integral(2)")', 'BrezziDouglasMarini(S, 2, variant="integral(3)")', 'BrezziDouglasMarini(S, 3, variant="integral(4)")', - 'BrezziDouglasMarini(T, 1, variant="point")', - 'BrezziDouglasMarini(T, 2, variant="point")', - 'BrezziDouglasMarini(T, 3, variant="point")', - 'BrezziDouglasMarini(S, 1, variant="point")', - 'BrezziDouglasMarini(S, 2, variant="point")', - 'BrezziDouglasMarini(S, 3, variant="point")', + 'BrezziDouglasMarini(T, 1, variant="equispaced")', + 'BrezziDouglasMarini(T, 2, variant="equispaced")', + 'BrezziDouglasMarini(T, 3, variant="equispaced")', + 'BrezziDouglasMarini(S, 1, variant="equispaced")', + 'BrezziDouglasMarini(S, 2, variant="equispaced")', + 'BrezziDouglasMarini(S, 3, variant="equispaced")', "Nedelec(T, 1)", "Nedelec(T, 2)", "Nedelec(T, 3)", @@ -204,12 +204,12 @@ def __init__(self, a, b): 'Nedelec(S, 1, variant="integral(2)")', 'Nedelec(S, 2, variant="integral(3)")', 'Nedelec(S, 3, variant="integral(4)")', - 'Nedelec(T, 1, variant="point")', - 'Nedelec(T, 2, variant="point")', - 'Nedelec(T, 3, variant="point")', - 'Nedelec(S, 1, variant="point")', - 'Nedelec(S, 2, variant="point")', - 'Nedelec(S, 3, variant="point")', + 'Nedelec(T, 1, variant="equispaced")', + 'Nedelec(T, 2, variant="equispaced")', + 'Nedelec(T, 3, variant="equispaced")', + 'Nedelec(S, 1, variant="equispaced")', + 'Nedelec(S, 2, variant="equispaced")', + 'Nedelec(S, 3, variant="equispaced")', "NedelecSecondKind(T, 1)", "NedelecSecondKind(T, 2)", "NedelecSecondKind(T, 3)", @@ -228,12 +228,12 @@ def __init__(self, a, b): 'NedelecSecondKind(S, 1, variant="integral(2)")', 'NedelecSecondKind(S, 2, variant="integral(3)")', 'NedelecSecondKind(S, 3, variant="integral(4)")', - 'NedelecSecondKind(T, 1, variant="point")', - 'NedelecSecondKind(T, 2, variant="point")', - 'NedelecSecondKind(T, 3, variant="point")', - 'NedelecSecondKind(S, 1, variant="point")', - 'NedelecSecondKind(S, 2, variant="point")', - 'NedelecSecondKind(S, 3, variant="point")', + 'NedelecSecondKind(T, 1, variant="equispaced")', + 'NedelecSecondKind(T, 2, variant="equispaced")', + 'NedelecSecondKind(T, 3, variant="equispaced")', + 'NedelecSecondKind(S, 1, variant="equispaced")', + 'NedelecSecondKind(S, 2, variant="equispaced")', + 'NedelecSecondKind(S, 3, variant="equispaced")', "Regge(T, 0)", "Regge(T, 1)", "Regge(T, 2)", From c7d51b8a8cccb06ad20b10c57ec6b3d650a56112 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 19:03:03 +0000 Subject: [PATCH 49/76] Extend lattice_iter to 0D --- FIAT/expansions.py | 4 +--- FIAT/gauss_legendre.py | 2 +- FIAT/reference_element.py | 9 +++------ test/unit/test_fiat.py | 6 +++--- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 96f0ed35a..c8764356d 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -75,9 +75,7 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): for codim in range(dim): # Extend the basis from codim to codim + 1 fa, fb, fc, dfa, dfb, dfc = jacobi_factors(*X[codim:codim+3], *dX[codim:codim+3]) - # Get indices of low-dimensional basis - alphas = [tuple()] if codim == 0 else reference_element.lattice_iter(0, n, codim) - for sub_index in alphas: + for sub_index in reference_element.lattice_iter(0, n, codim): # handle i = 1 icur = idx(*sub_index, 0) inext = idx(*sub_index, 1) diff --git a/FIAT/gauss_legendre.py b/FIAT/gauss_legendre.py index 1582c5caf..2ac0f2c58 100644 --- a/FIAT/gauss_legendre.py +++ b/FIAT/gauss_legendre.py @@ -32,7 +32,7 @@ def __init__(self, ref_el, degree): entity_permutations[dim][entity] = perms # make nodes by getting points - pts = make_lattice(ref_el.get_vertices(), degree, family="gl") + pts = make_lattice(ref_el.get_vertices(), degree, variant="gl") nodes = [functional.PointEvaluation(ref_el, x) for x in pts] entity_ids[dim][0] = list(range(len(nodes))) super(GaussLegendreDualSet, self).__init__(nodes, ref_el, entity_ids, entity_permutations) diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 1f8618215..a3a4fb376 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -57,16 +57,13 @@ def multiindex_equal(d, isum, imin=0): def lattice_iter(start, finish, depth): """Generator iterating over the depth-dimensional lattice of integers between start and (finish-1). This works on simplices in - 1d, 2d, 3d, and beyond""" + 0d, 1d, 2d, 3d, and beyond""" if depth == 0: - return - elif depth == 1: - for ii in range(start, finish): - yield [ii] + yield tuple() else: for ii in range(start, finish): for jj in lattice_iter(start, finish - ii, depth - 1): - yield jj + [ii] + yield jj + (ii,) def make_lattice(verts, n, interior=0, variant=None): diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 8edcec530..805ad4e34 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -546,7 +546,7 @@ def test_expansion_values(dim): dpoints.append(tuple(2*np.array(alpha, dtype="d")/npoints-1)) rpoints.append(tuple(2*sympy.Rational(a, npoints)-1 for a in alpha)) - n = 48 + n = 20 eta = sympy.DeferredVector("eta") Uvals = U.tabulate(n, dpoints) if dim == 1: @@ -561,8 +561,8 @@ def test_expansion_values(dim): assert error < 1E-13 elif dim == 2: idx = expansions.morton_index2 - for p in range(n + 1): - q = n - p + for q in range(n + 1): + p = n - q f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p) f *= sympy.sqrt((half + p) * (1 + p + q)) From 79a47d0caa9daaae895d512b0c3d5266fbf44d52 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 20:26:22 +0000 Subject: [PATCH 50/76] revert some changes --- FIAT/expansions.py | 4 +-- FIAT/gauss_legendre.py | 2 +- FIAT/gauss_lobatto_legendre.py | 2 +- FIAT/lagrange.py | 4 +-- FIAT/reference_element.py | 25 ++++++++-------- test/unit/test_fiat.py | 54 +++++++++++++++++----------------- 6 files changed, 44 insertions(+), 47 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 96f0ed35a..c8764356d 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -75,9 +75,7 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): for codim in range(dim): # Extend the basis from codim to codim + 1 fa, fb, fc, dfa, dfb, dfc = jacobi_factors(*X[codim:codim+3], *dX[codim:codim+3]) - # Get indices of low-dimensional basis - alphas = [tuple()] if codim == 0 else reference_element.lattice_iter(0, n, codim) - for sub_index in alphas: + for sub_index in reference_element.lattice_iter(0, n, codim): # handle i = 1 icur = idx(*sub_index, 0) inext = idx(*sub_index, 1) diff --git a/FIAT/gauss_legendre.py b/FIAT/gauss_legendre.py index 1582c5caf..2ac0f2c58 100644 --- a/FIAT/gauss_legendre.py +++ b/FIAT/gauss_legendre.py @@ -32,7 +32,7 @@ def __init__(self, ref_el, degree): entity_permutations[dim][entity] = perms # make nodes by getting points - pts = make_lattice(ref_el.get_vertices(), degree, family="gl") + pts = make_lattice(ref_el.get_vertices(), degree, variant="gl") nodes = [functional.PointEvaluation(ref_el, x) for x in pts] entity_ids[dim][0] = list(range(len(nodes))) super(GaussLegendreDualSet, self).__init__(nodes, ref_el, entity_ids, entity_permutations) diff --git a/FIAT/gauss_lobatto_legendre.py b/FIAT/gauss_lobatto_legendre.py index 51f9bf4db..aec3d089f 100644 --- a/FIAT/gauss_lobatto_legendre.py +++ b/FIAT/gauss_lobatto_legendre.py @@ -18,7 +18,7 @@ class GaussLobattoLegendre(finite_element.CiarletElement): def __init__(self, ref_el, degree): if ref_el.shape not in {LINE, TRIANGLE, TETRAHEDRON}: raise ValueError("Gauss-Lobatto-Legendre elements are only defined on simplices.") - dual = lagrange.LagrangeDualSet(ref_el, degree, family="lgl") + dual = lagrange.LagrangeDualSet(ref_el, degree, variant="gll") if ref_el.shape == LINE: points = [] for node in dual.nodes: diff --git a/FIAT/lagrange.py b/FIAT/lagrange.py index bce14380f..12816ad0a 100644 --- a/FIAT/lagrange.py +++ b/FIAT/lagrange.py @@ -14,7 +14,7 @@ class LagrangeDualSet(dual_set.DualSet): simplices of any dimension. Nodes are point evaluation at equispaced points.""" - def __init__(self, ref_el, degree, family=None): + def __init__(self, ref_el, degree, variant=None): entity_ids = {} nodes = [] entity_permutations = {} @@ -29,7 +29,7 @@ def __init__(self, ref_el, degree, family=None): entity_permutations[dim] = {} perms = {0: [0]} if dim == 0 else make_entity_permutations_simplex(dim, degree - dim) for entity in sorted(top[dim]): - pts_cur = ref_el.make_points(dim, entity, degree, family=family) + pts_cur = ref_el.make_points(dim, entity, degree, variant=variant) nodes_cur = [functional.PointEvaluation(ref_el, x) for x in pts_cur] nnodes_cur = len(nodes_cur) diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 5684d14ed..a3a4fb376 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -57,19 +57,16 @@ def multiindex_equal(d, isum, imin=0): def lattice_iter(start, finish, depth): """Generator iterating over the depth-dimensional lattice of integers between start and (finish-1). This works on simplices in - 1d, 2d, 3d, and beyond""" + 0d, 1d, 2d, 3d, and beyond""" if depth == 0: - return - elif depth == 1: - for ii in range(start, finish): - yield [ii] + yield tuple() else: for ii in range(start, finish): for jj in lattice_iter(start, finish - ii, depth - 1): - yield jj + [ii] + yield jj + (ii,) -def make_lattice(verts, n, interior=0, family=None): +def make_lattice(verts, n, interior=0, variant=None): """Constructs a lattice of points on the simplex defined by verts. For example, the 1:st order lattice will be just the vertices. The optional argument interior specifies how many points from @@ -77,9 +74,11 @@ def make_lattice(verts, n, interior=0, family=None): and interior = 0, this function will return the vertices and midpoint, but with interior = 1, it will only return the midpoint.""" - if family is None or family == "equispaced": - family = "equi" - family = _decode_family(family) + if variant is None or variant == "equispaced": + variant = "equi" + elif variant == "gll": + variant = "lgl" + family = _decode_family(variant) D = len(verts) X = numpy.array(verts) get_point = lambda alpha: tuple(numpy.dot(_recursive(D - 1, n, alpha, family), X)) @@ -406,7 +405,7 @@ def compute_face_edge_tangents(self, dim, entity_id): edge_ts.append(vert_coords[dest] - vert_coords[source]) return edge_ts - def make_points(self, dim, entity_id, order, family=None): + def make_points(self, dim, entity_id, order, variant=None): """Constructs a lattice of points on the entity_id:th facet of dimension dim. Order indicates how many points to include in each direction.""" @@ -416,9 +415,9 @@ def make_points(self, dim, entity_id, order, family=None): entity_verts = \ self.get_vertices_of_subcomplex( self.get_topology()[dim][entity_id]) - return make_lattice(entity_verts, order, 1, family=family) + return make_lattice(entity_verts, order, 1, variant=variant) elif dim == self.get_spatial_dimension(): - return make_lattice(self.get_vertices(), order, 1, family=family) + return make_lattice(self.get_vertices(), order, 1, variant=variant) else: raise ValueError("illegal dimension") diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index d4e08becf..805ad4e34 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -150,12 +150,12 @@ def __init__(self, a, b): 'RaviartThomas(S, 1, variant="integral(2)")', 'RaviartThomas(S, 2, variant="integral(3)")', 'RaviartThomas(S, 3, variant="integral(4)")', - 'RaviartThomas(T, 1, variant="point")', - 'RaviartThomas(T, 2, variant="point")', - 'RaviartThomas(T, 3, variant="point")', - 'RaviartThomas(S, 1, variant="point")', - 'RaviartThomas(S, 2, variant="point")', - 'RaviartThomas(S, 3, variant="point")', + 'RaviartThomas(T, 1, variant="equispaced")', + 'RaviartThomas(T, 2, variant="equispaced")', + 'RaviartThomas(T, 3, variant="equispaced")', + 'RaviartThomas(S, 1, variant="equispaced")', + 'RaviartThomas(S, 2, variant="equispaced")', + 'RaviartThomas(S, 3, variant="equispaced")', "DiscontinuousRaviartThomas(T, 1)", "DiscontinuousRaviartThomas(T, 2)", "DiscontinuousRaviartThomas(T, 3)", @@ -180,12 +180,12 @@ def __init__(self, a, b): 'BrezziDouglasMarini(S, 1, variant="integral(2)")', 'BrezziDouglasMarini(S, 2, variant="integral(3)")', 'BrezziDouglasMarini(S, 3, variant="integral(4)")', - 'BrezziDouglasMarini(T, 1, variant="point")', - 'BrezziDouglasMarini(T, 2, variant="point")', - 'BrezziDouglasMarini(T, 3, variant="point")', - 'BrezziDouglasMarini(S, 1, variant="point")', - 'BrezziDouglasMarini(S, 2, variant="point")', - 'BrezziDouglasMarini(S, 3, variant="point")', + 'BrezziDouglasMarini(T, 1, variant="equispaced")', + 'BrezziDouglasMarini(T, 2, variant="equispaced")', + 'BrezziDouglasMarini(T, 3, variant="equispaced")', + 'BrezziDouglasMarini(S, 1, variant="equispaced")', + 'BrezziDouglasMarini(S, 2, variant="equispaced")', + 'BrezziDouglasMarini(S, 3, variant="equispaced")', "Nedelec(T, 1)", "Nedelec(T, 2)", "Nedelec(T, 3)", @@ -204,12 +204,12 @@ def __init__(self, a, b): 'Nedelec(S, 1, variant="integral(2)")', 'Nedelec(S, 2, variant="integral(3)")', 'Nedelec(S, 3, variant="integral(4)")', - 'Nedelec(T, 1, variant="point")', - 'Nedelec(T, 2, variant="point")', - 'Nedelec(T, 3, variant="point")', - 'Nedelec(S, 1, variant="point")', - 'Nedelec(S, 2, variant="point")', - 'Nedelec(S, 3, variant="point")', + 'Nedelec(T, 1, variant="equispaced")', + 'Nedelec(T, 2, variant="equispaced")', + 'Nedelec(T, 3, variant="equispaced")', + 'Nedelec(S, 1, variant="equispaced")', + 'Nedelec(S, 2, variant="equispaced")', + 'Nedelec(S, 3, variant="equispaced")', "NedelecSecondKind(T, 1)", "NedelecSecondKind(T, 2)", "NedelecSecondKind(T, 3)", @@ -228,12 +228,12 @@ def __init__(self, a, b): 'NedelecSecondKind(S, 1, variant="integral(2)")', 'NedelecSecondKind(S, 2, variant="integral(3)")', 'NedelecSecondKind(S, 3, variant="integral(4)")', - 'NedelecSecondKind(T, 1, variant="point")', - 'NedelecSecondKind(T, 2, variant="point")', - 'NedelecSecondKind(T, 3, variant="point")', - 'NedelecSecondKind(S, 1, variant="point")', - 'NedelecSecondKind(S, 2, variant="point")', - 'NedelecSecondKind(S, 3, variant="point")', + 'NedelecSecondKind(T, 1, variant="equispaced")', + 'NedelecSecondKind(T, 2, variant="equispaced")', + 'NedelecSecondKind(T, 3, variant="equispaced")', + 'NedelecSecondKind(S, 1, variant="equispaced")', + 'NedelecSecondKind(S, 2, variant="equispaced")', + 'NedelecSecondKind(S, 3, variant="equispaced")', "Regge(T, 0)", "Regge(T, 1)", "Regge(T, 2)", @@ -546,7 +546,7 @@ def test_expansion_values(dim): dpoints.append(tuple(2*np.array(alpha, dtype="d")/npoints-1)) rpoints.append(tuple(2*sympy.Rational(a, npoints)-1 for a in alpha)) - n = 48 + n = 20 eta = sympy.DeferredVector("eta") Uvals = U.tabulate(n, dpoints) if dim == 1: @@ -561,8 +561,8 @@ def test_expansion_values(dim): assert error < 1E-13 elif dim == 2: idx = expansions.morton_index2 - for p in range(n + 1): - q = n - p + for q in range(n + 1): + p = n - q f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p) f *= sympy.sqrt((half + p) * (1 + p + q)) From 553673c2c60e1bf5163df38c90dc1913a4600357 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 20:33:02 +0000 Subject: [PATCH 51/76] revert tests --- test/unit/test_fiat.py | 48 +++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 805ad4e34..9959ffe32 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -150,12 +150,12 @@ def __init__(self, a, b): 'RaviartThomas(S, 1, variant="integral(2)")', 'RaviartThomas(S, 2, variant="integral(3)")', 'RaviartThomas(S, 3, variant="integral(4)")', - 'RaviartThomas(T, 1, variant="equispaced")', - 'RaviartThomas(T, 2, variant="equispaced")', - 'RaviartThomas(T, 3, variant="equispaced")', - 'RaviartThomas(S, 1, variant="equispaced")', - 'RaviartThomas(S, 2, variant="equispaced")', - 'RaviartThomas(S, 3, variant="equispaced")', + 'RaviartThomas(T, 1, variant="point")', + 'RaviartThomas(T, 2, variant="point")', + 'RaviartThomas(T, 3, variant="point")', + 'RaviartThomas(S, 1, variant="point")', + 'RaviartThomas(S, 2, variant="point")', + 'RaviartThomas(S, 3, variant="point")', "DiscontinuousRaviartThomas(T, 1)", "DiscontinuousRaviartThomas(T, 2)", "DiscontinuousRaviartThomas(T, 3)", @@ -180,12 +180,12 @@ def __init__(self, a, b): 'BrezziDouglasMarini(S, 1, variant="integral(2)")', 'BrezziDouglasMarini(S, 2, variant="integral(3)")', 'BrezziDouglasMarini(S, 3, variant="integral(4)")', - 'BrezziDouglasMarini(T, 1, variant="equispaced")', - 'BrezziDouglasMarini(T, 2, variant="equispaced")', - 'BrezziDouglasMarini(T, 3, variant="equispaced")', - 'BrezziDouglasMarini(S, 1, variant="equispaced")', - 'BrezziDouglasMarini(S, 2, variant="equispaced")', - 'BrezziDouglasMarini(S, 3, variant="equispaced")', + 'BrezziDouglasMarini(T, 1, variant="point")', + 'BrezziDouglasMarini(T, 2, variant="point")', + 'BrezziDouglasMarini(T, 3, variant="point")', + 'BrezziDouglasMarini(S, 1, variant="point")', + 'BrezziDouglasMarini(S, 2, variant="point")', + 'BrezziDouglasMarini(S, 3, variant="point")', "Nedelec(T, 1)", "Nedelec(T, 2)", "Nedelec(T, 3)", @@ -204,12 +204,12 @@ def __init__(self, a, b): 'Nedelec(S, 1, variant="integral(2)")', 'Nedelec(S, 2, variant="integral(3)")', 'Nedelec(S, 3, variant="integral(4)")', - 'Nedelec(T, 1, variant="equispaced")', - 'Nedelec(T, 2, variant="equispaced")', - 'Nedelec(T, 3, variant="equispaced")', - 'Nedelec(S, 1, variant="equispaced")', - 'Nedelec(S, 2, variant="equispaced")', - 'Nedelec(S, 3, variant="equispaced")', + 'Nedelec(T, 1, variant="point")', + 'Nedelec(T, 2, variant="point")', + 'Nedelec(T, 3, variant="point")', + 'Nedelec(S, 1, variant="point")', + 'Nedelec(S, 2, variant="point")', + 'Nedelec(S, 3, variant="point")', "NedelecSecondKind(T, 1)", "NedelecSecondKind(T, 2)", "NedelecSecondKind(T, 3)", @@ -228,12 +228,12 @@ def __init__(self, a, b): 'NedelecSecondKind(S, 1, variant="integral(2)")', 'NedelecSecondKind(S, 2, variant="integral(3)")', 'NedelecSecondKind(S, 3, variant="integral(4)")', - 'NedelecSecondKind(T, 1, variant="equispaced")', - 'NedelecSecondKind(T, 2, variant="equispaced")', - 'NedelecSecondKind(T, 3, variant="equispaced")', - 'NedelecSecondKind(S, 1, variant="equispaced")', - 'NedelecSecondKind(S, 2, variant="equispaced")', - 'NedelecSecondKind(S, 3, variant="equispaced")', + 'NedelecSecondKind(T, 1, variant="point")', + 'NedelecSecondKind(T, 2, variant="point")', + 'NedelecSecondKind(T, 3, variant="point")', + 'NedelecSecondKind(S, 1, variant="point")', + 'NedelecSecondKind(S, 2, variant="point")', + 'NedelecSecondKind(S, 3, variant="point")', "Regge(T, 0)", "Regge(T, 1)", "Regge(T, 2)", From 85c505be63768278c188603dd2d22916e4b683ce Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 21:53:46 +0000 Subject: [PATCH 52/76] remove _tabulate_dpts --- FIAT/expansions.py | 130 +++++++++++---------------------------------- 1 file changed, 30 insertions(+), 100 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index c8764356d..3229f40e6 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -9,7 +9,6 @@ import numpy import math -import sympy from FIAT import reference_element from FIAT import jacobi @@ -107,77 +106,6 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): dphi[idx(*alpha)] *= scale -def _tabulate_dpts(tabulator, D, n, order, pts): - X = sympy.DeferredVector('x') - - def form_derivative(F): - '''Forms the derivative recursively, i.e., - F -> [F_x, F_y, F_z], - [F_x, F_y, F_z] -> [[F_xx, F_xy, F_xz], - [F_yx, F_yy, F_yz], - [F_zx, F_zy, F_zz]] - and so forth. - ''' - out = [] - try: - out = [sympy.diff(F, X[j]) for j in range(D)] - except (AttributeError, ValueError): - # Intercept errors like - # AttributeError: 'list' object has no attribute - # 'free_symbols' - for f in F: - out.append(form_derivative(f)) - return out - - def numpy_lambdify(X, F): - '''Unfortunately, SymPy's own lambdify() doesn't work well with - NumPy in that simple functions like - lambda x: 1.0, - when evaluated with NumPy arrays, return just "1.0" instead of - an array of 1s with the same shape as x. This function does that. - ''' - try: - lambda_x = [numpy_lambdify(X, f) for f in F] - except TypeError: # 'function' object is not iterable - # SymPy's lambdify also works on functions that return arrays. - # However, use it componentwise here so we can add 0*x to each - # component individually. This is necessary to maintain shapes - # if evaluated with NumPy arrays. - lmbd_tmp = sympy.lambdify(X, F) - lambda_x = lambda x: lmbd_tmp(x) + 0 * x[0] - return lambda_x - - def evaluate_lambda(lmbd, x): - '''Properly evaluate lambda expressions recursively for iterables. - ''' - try: - values = [evaluate_lambda(l, x) for l in lmbd] - except TypeError: # 'function' object is not iterable - values = lmbd(x) - return values - - # Tabulate symbolically - symbolic_tab = tabulator(n, X) - # Make sure that the entries of symbolic_tab are lists so we can - # append derivatives - symbolic_tab = [[phi] for phi in symbolic_tab] - # - data = (order + 1) * [None] - for r in range(order + 1): - shape = [len(symbolic_tab), len(pts)] + r * [D] - data[r] = numpy.empty(shape) - for i, phi in enumerate(symbolic_tab): - # Evaluate the function numerically using lambda expressions - deriv_lambda = numpy_lambdify(X, phi[r]) - data[r][i] = \ - numpy.array(evaluate_lambda(deriv_lambda, pts.T)).T - # Symbolically compute the next derivative. - # This actually happens once too many here; never mind for - # now. - phi.append(form_derivative(phi[-1])) - return data - - def xi_triangle(eta): """Maps from [-1,1]^2 to the (-1,1) reference triangle.""" eta1, eta2 = eta @@ -253,24 +181,6 @@ def _tabulate_derivatives(self, n, pts): dubiner_recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) return phi, dphi - def tabulate(self, n, pts): - if len(pts) == 0: - return numpy.array([]) - return numpy.array(self._tabulate(n, numpy.transpose(pts))) - - def tabulate_derivatives(self, n, pts): - D = self.ref_el.get_spatial_dimension() - vals, deriv_vals = self._tabulate_derivatives(n, numpy.transpose(pts)) - # Create the ordinary data structure. - data = [[(vals[i][j], [deriv_vals[i][r][j] for r in range(D)]) - for j in range(len(vals[0]))] - for i in range(len(vals))] - return data - - def tabulate_jet(self, n, pts, order=1): - D = self.ref_el.get_spatial_dimension() - return _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts)) - def get_dmats(self, degree): """Returns a numpy array with the expansion coefficients dmat[k, j, i] of the gradient of each member of the expansion set: @@ -326,6 +236,36 @@ def distance(alpha, beta): result[alpha] = vals return result + def tabulate(self, n, pts): + if len(pts) == 0: + return numpy.array([]) + return numpy.array(self._tabulate(n, numpy.transpose(pts))) + + def tabulate_derivatives(self, n, pts): + vals, deriv_vals = self._tabulate_derivatives(n, numpy.transpose(pts)) + # Create the ordinary data structure. + D = self.ref_el.get_spatial_dimension() + data = [[(vals[i][j], [deriv_vals[i][r][j] for r in range(D)]) + for j in range(len(vals[0]))] + for i in range(len(vals))] + return data + + def tabulate_jet(self, n, pts, order=1): + vals = self._tabulate_jet(n, pts, order=order) + # Create the ordinary data structure. + D = self.ref_el.get_spatial_dimension() + v0 = vals[(0,)*D] + data = [v0] + for r in range(1, order+1): + v = numpy.zeros((D,)*r + v0.shape, dtype=v0.dtype) + for index in zip(*[range(D) for k in range(r)]): + alpha = [0] * D + for i in index: + alpha[i] += 1 + v[index] = vals[tuple(alpha)] + data.append(v.transpose((r, r+1) + tuple(range(r)))) + return data + class PointExpansionSet(ExpansionSet): """Evaluates the point basis on a point reference element.""" @@ -339,16 +279,6 @@ def tabulate(self, n, pts): assert n == 0 return numpy.ones((1, len(pts))) - def tabulate_derivatives(self, n, pts): - """Returns a numpy array of size A where A[i,j] = phi_i(pts[j]) - but where each element is an empty tuple (). This maintains - compatibility with the interfaces of the interval, triangle and - tetrahedron expansions.""" - deriv_vals = numpy.empty_like(self.tabulate(n, pts), dtype=tuple) - deriv_vals.fill(()) - - return deriv_vals - class LineExpansionSet(ExpansionSet): """Evaluates the Legendre basis on a line reference element.""" From f9f88a74db2b2cf7cd7a3c74bab1ca28f3c590f6 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 23:02:02 +0000 Subject: [PATCH 53/76] untested hessian --- FIAT/expansions.py | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 3229f40e6..0d307a74d 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -37,7 +37,7 @@ def pad_coordinates(ref_pts, embedded_dim): def pad_jacobian(A, embedded_dim): """Pad coordinate mapping Jacobian by appending zero rows.""" A = numpy.pad(A, [(0, embedded_dim - A.shape[0]), (0, 0)]) - return tuple(row[:, None] for row in A) + return tuple(row[..., None] for row in A) def jacobi_factors(x, y, z, dx, dy, dz): @@ -52,12 +52,14 @@ def jacobi_factors(x, y, z, dx, dy, dz): return fa, fb, fc, dfa, dfb, dfc -def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): +def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None, ddphi=None): """Dubiner recurrence from (Kirby 2010)""" - skip_derivs = dphi is None + outer = lambda x, y: x * y[..., None] phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) - if not skip_derivs: + if dphi is not None: dphi[0] = ref_pts - ref_pts + if ddphi is not None: + ddphi[0] = outer(dphi[0], dphi[0]) if dim == 0 or n == 0: return if dim > 3 or dim < 0: @@ -83,27 +85,37 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None): a = b + 1.0 factor = a * fa - b * fb phi[inext] = factor * phi[icur] - if not skip_derivs: + if dphi is not None: dfactor = a * dfa - b * dfb dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor + if ddphi is not None: + ddphi[inext] = factor * ddphi[icur] + 2 * outer(dphi[icur], dfactor) + ddfc = 2 * outer(dfb, dfb) + # general i by recurrence for i in range(1, n - sum(sub_index)): iprev, icur, inext = icur, inext, idx(*sub_index, i + 1) a, b, c = jrc(alpha, 0, i) factor = a * fa - b * fb phi[inext] = factor * phi[icur] - c * (fc * phi[iprev]) - if skip_derivs: + if dphi is None: continue dfactor = a * dfa - b * dfb dphi[inext] = (factor * dphi[icur] + phi[icur] * dfactor - c * (fc * dphi[iprev] + phi[iprev] * dfc)) + if ddphi is None: + continue + ddphi[inext] = (factor * ddphi[icur] + 2 * outer(dphi[icur], dfactor) - + c * (fc * ddphi[iprev] + 2 * outer(dphi[iprev], dfc) + phi[iprev] * ddfc)) + # normalize for alpha in reference_element.lattice_iter(0, n+1, codim+1): scale = math.sqrt(sum(alpha) + 0.5 * len(alpha)) phi[idx(*alpha)] *= scale - if skip_derivs: - continue - dphi[idx(*alpha)] *= scale + if dphi is not None: + dphi[idx(*alpha)] *= scale + if ddphi is not None: + ddphi[idx(*alpha)] *= scale def xi_triangle(eta): @@ -175,9 +187,8 @@ def _tabulate_derivatives(self, n, pts): """A version of tabulate_derivatives() that also works for a single point. """ D = self.ref_el.get_spatial_dimension() - num_members = self.get_num_members(n) - phi = [None] * num_members - dphi = [None] * num_members + phi = [None] * self.get_num_members(n) + dphi = [None] * len(phi) dubiner_recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) return phi, dphi From 890adb78a41f05227fe79c7449e4de6b634219cb Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Mon, 6 Nov 2023 23:44:34 +0000 Subject: [PATCH 54/76] better reindexing --- FIAT/expansions.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 0d307a74d..fb7ea5930 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -270,10 +270,7 @@ def tabulate_jet(self, n, pts, order=1): for r in range(1, order+1): v = numpy.zeros((D,)*r + v0.shape, dtype=v0.dtype) for index in zip(*[range(D) for k in range(r)]): - alpha = [0] * D - for i in index: - alpha[i] += 1 - v[index] = vals[tuple(alpha)] + v[index] = vals[tuple(map(index.count, range(D)))] data.append(v.transpose((r, r+1) + tuple(range(r)))) return data From 34f622ccd43ea0d69dbeba483d9eebc2fe00d3dd Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 7 Nov 2023 07:09:16 +0000 Subject: [PATCH 55/76] fix tabulate_jet --- FIAT/expansions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index fb7ea5930..e520df13a 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -9,6 +9,7 @@ import numpy import math +from itertools import product from FIAT import reference_element from FIAT import jacobi @@ -269,7 +270,7 @@ def tabulate_jet(self, n, pts, order=1): data = [v0] for r in range(1, order+1): v = numpy.zeros((D,)*r + v0.shape, dtype=v0.dtype) - for index in zip(*[range(D) for k in range(r)]): + for index in product(range(D), repeat=r): v[index] = vals[tuple(map(index.count, range(D)))] data.append(v.transpose((r, r+1) + tuple(range(r)))) return data From 8a86dbe3d27d3848617513cd15dd387f8aa286e4 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 7 Nov 2023 07:30:39 +0000 Subject: [PATCH 56/76] do not append empty tuples --- FIAT/reference_element.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index a3a4fb376..5c0137684 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -60,6 +60,9 @@ def lattice_iter(start, finish, depth): 0d, 1d, 2d, 3d, and beyond""" if depth == 0: yield tuple() + elif depth == 1: + for ii in range(start, finish): + yield (ii,) else: for ii in range(start, finish): for jj in lattice_iter(start, finish - ii, depth - 1): From 35d527b38e5b3ff544ce0b07db62987f7ce62a15 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 7 Nov 2023 07:31:40 +0000 Subject: [PATCH 57/76] flake --- FIAT/reference_element.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 5c0137684..66225a674 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -61,8 +61,8 @@ def lattice_iter(start, finish, depth): if depth == 0: yield tuple() elif depth == 1: - for ii in range(start, finish): - yield (ii,) + for ii in range(start, finish): + yield (ii,) else: for ii in range(start, finish): for jj in lattice_iter(start, finish - ii, depth - 1): From 28afe6a40d5ed83a2d592a115ff176b63a572068 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 7 Nov 2023 17:43:10 +0000 Subject: [PATCH 58/76] fix some indices --- FIAT/polynomial_set.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 2fb247f12..829e8cc93 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -133,7 +133,7 @@ def __init__(self, ref_el, degree, shape=tuple()): expansion_set = expansions.ExpansionSet(ref_el) # set up coefficients - coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions]) + coeffs_shape = (num_members,) + shape + (num_exp_functions,) coeffs = numpy.zeros(coeffs_shape, "d") # use functional's index_iterator function @@ -145,7 +145,7 @@ def __init__(self, ref_el, degree, shape=tuple()): for idx in index_iterator(shape): n = expansions.polynomial_dimension(ref_el, embedded_degree) for exp_bf in range(n): - cur_idx = tuple([cur_bf] + list(idx) + [exp_bf]) + cur_idx = (cur_bf,) + idx + (exp_bf,) coeffs[cur_idx] = 1.0 cur_bf += 1 @@ -198,7 +198,7 @@ def polynomial_set_union_normalized(A, B): (u, sig, vt) = numpy.linalg.svd(nc, 1) num_sv = len([s for s in sig if abs(s) > 1.e-10]) - coeffs = numpy.reshape(vt[:num_sv], tuple([num_sv] + list(func_shape))) + coeffs = numpy.reshape(vt[:num_sv], (num_sv,) + func_shape) return PolynomialSet(A.get_reference_element(), A.get_degree(), @@ -227,22 +227,19 @@ def __init__(self, ref_el, degree, size=None): expansion_set = expansions.ExpansionSet(ref_el) # set up coefficients for symmetric tensors - coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions]) + coeffs_shape = (num_members,) + shape + (num_exp_functions,) coeffs = numpy.zeros(coeffs_shape, "d") cur_bf = 0 for [i, j] in index_iterator(shape): n = expansions.polynomial_dimension(ref_el, embedded_degree) if i == j: for exp_bf in range(n): - cur_idx = tuple([cur_bf] + [i, j] + [exp_bf]) - coeffs[cur_idx] = 1.0 + coeffs[cur_bf, i, j, exp_bf] = 1.0 cur_bf += 1 elif i < j: for exp_bf in range(n): - cur_idx = tuple([cur_bf] + [i, j] + [exp_bf]) - coeffs[cur_idx] = 1.0 - cur_idx = tuple([cur_bf] + [j, i] + [exp_bf]) - coeffs[cur_idx] = 1.0 + coeffs[cur_bf, i, j, exp_bf] = 1.0 + coeffs[cur_bf, j, i, exp_bf] = 1.0 cur_bf += 1 # construct dmats. this is the same as ONPolynomialSet. From 127b26f9b06201b73e1f973248ecb302454fa163 Mon Sep 17 00:00:00 2001 From: Rob Kirby Date: Tue, 7 Nov 2023 15:07:43 -0600 Subject: [PATCH 59/76] Add a test to catch np.linalg.solve silently producing a wrong answer --- FIAT/finite_element.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index d763f31ee..d4f666a8e 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -10,9 +10,9 @@ import numpy +from FIAT.dual_set import DualSet from FIAT.polynomial_set import PolynomialSet from FIAT.quadrature_schemes import create_quadrature -from FIAT.dual_set import DualSet class FiniteElement(object): @@ -137,6 +137,9 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref self.V = V new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) + resid = numpy.linalg.norm(numpy.dot(numpy.transpose(V), new_coeffs_flat) - B, 1) + if resid > 1.e-10: + raise numpy.linalg.LinAlgError("nontrivial residual in linear system solution") new_shp = new_coeffs_flat.shape[:1] + shp[1:] new_coeffs = numpy.reshape(new_coeffs_flat, new_shp) From 8b0382b3a61b50ffbeb7f455d8839a81d41e6959 Mon Sep 17 00:00:00 2001 From: Rob Kirby Date: Tue, 7 Nov 2023 15:12:41 -0600 Subject: [PATCH 60/76] update tolerance so rest of tests pass --- FIAT/finite_element.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index d4f666a8e..58220dd73 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -138,7 +138,8 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) resid = numpy.linalg.norm(numpy.dot(numpy.transpose(V), new_coeffs_flat) - B, 1) - if resid > 1.e-10: + if resid > 1.e-7: + print("residual: ", resid) raise numpy.linalg.LinAlgError("nontrivial residual in linear system solution") new_shp = new_coeffs_flat.shape[:1] + shp[1:] From 2b78af768c07cbb959423e0cfa6820758fbd4b38 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Tue, 7 Nov 2023 22:51:14 +0000 Subject: [PATCH 61/76] QR on dualmat makes illposed tests throw a singular matrix error, while improving high order accuracy --- FIAT/finite_element.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index 58220dd73..520a1c86a 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -135,12 +135,16 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref V = numpy.dot(A, numpy.transpose(B)) self.V = V + # new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) - new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) - resid = numpy.linalg.norm(numpy.dot(numpy.transpose(V), new_coeffs_flat) - B, 1) - if resid > 1.e-7: - print("residual: ", resid) - raise numpy.linalg.LinAlgError("nontrivial residual in linear system solution") + QA, RA = numpy.linalg.qr(A) + VT = numpy.dot(B, numpy.transpose(RA)) + X = numpy.linalg.solve(VT, B) + new_coeffs_flat = numpy.dot(QA, X) + + relres = numpy.linalg.norm(numpy.dot(VT, X) - B, "fro") / numpy.linalg.norm(B, "fro") + if relres > 1.E-7: + raise numpy.linalg.LinAlgError("Nontrivial residual in linear system solution") new_shp = new_coeffs_flat.shape[:1] + shp[1:] new_coeffs = numpy.reshape(new_coeffs_flat, new_shp) From 86a80b0c6c1c6d6cbb42b7f7e67aa2c82a9d981c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 15:16:01 +0000 Subject: [PATCH 62/76] cleanup --- FIAT/finite_element.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index 520a1c86a..af7d2f056 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -124,30 +124,18 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref dualmat = dual.to_riesz(poly_set) shp = dualmat.shape - if len(shp) > 2: - num_cols = numpy.prod(shp[1:]) - - A = numpy.reshape(dualmat, (dualmat.shape[0], num_cols)) - B = numpy.reshape(old_coeffs, (old_coeffs.shape[0], num_cols)) - else: - A = dualmat - B = old_coeffs - + A = dualmat.reshape((shp[0], -1)) + B = old_coeffs.reshape((shp[0], -1)) V = numpy.dot(A, numpy.transpose(B)) self.V = V - # new_coeffs_flat = numpy.linalg.solve(numpy.transpose(V), B) - - QA, RA = numpy.linalg.qr(A) - VT = numpy.dot(B, numpy.transpose(RA)) - X = numpy.linalg.solve(VT, B) - new_coeffs_flat = numpy.dot(QA, X) - relres = numpy.linalg.norm(numpy.dot(VT, X) - B, "fro") / numpy.linalg.norm(B, "fro") - if relres > 1.E-7: - raise numpy.linalg.LinAlgError("Nontrivial residual in linear system solution") + # new_coeffs_flat = numpy.linalg.solve(V.T, B) + Q, R = numpy.linalg.qr(A) + BR = numpy.dot(B, numpy.transpose(R)) + new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(BR, B)) new_shp = new_coeffs_flat.shape[:1] + shp[1:] - new_coeffs = numpy.reshape(new_coeffs_flat, new_shp) + new_coeffs = new_coeffs_flat.reshape(new_shp) self.poly_set = PolynomialSet(ref_el, poly_set.get_degree(), From 4453505d7cc779fb9b045b43c202990dbd6f70a7 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 16:33:48 +0000 Subject: [PATCH 63/76] cleanup test_expansion_values --- FIAT/finite_element.py | 4 +- test/unit/test_fiat.py | 91 +++++++++++++++----------------- test/unit/test_gauss_legendre.py | 4 +- 3 files changed, 48 insertions(+), 51 deletions(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index af7d2f056..32381a473 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -131,8 +131,8 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref # new_coeffs_flat = numpy.linalg.solve(V.T, B) Q, R = numpy.linalg.qr(A) - BR = numpy.dot(B, numpy.transpose(R)) - new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(BR, B)) + VTQ = numpy.dot(B, numpy.transpose(R)) + new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(VTQ, B)) new_shp = new_coeffs_flat.shape[:1] + shp[1:] new_coeffs = new_coeffs_flat.reshape(new_shp) diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 805ad4e34..733571527 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -533,65 +533,62 @@ def test_expansion_orthonormality(cell): @pytest.mark.parametrize('dim', range(1, 4)) def test_expansion_values(dim): import sympy - from FIAT import expansions, reference_element - half = sympy.Rational(1, 2) + from FIAT import expansions, polynomial_set, reference_element cell = reference_element.default_simplex(dim) U = expansions.ExpansionSet(cell) dpoints = [] rpoints = [] - npoints = 10 + npoints = 4 interior = 1 for alpha in reference_element.lattice_iter(interior, npoints+1-interior, dim): dpoints.append(tuple(2*np.array(alpha, dtype="d")/npoints-1)) rpoints.append(tuple(2*sympy.Rational(a, npoints)-1 for a in alpha)) n = 20 - eta = sympy.DeferredVector("eta") Uvals = U.tabulate(n, dpoints) - if dim == 1: - for p in range(n + 1): - f = sympy.jacobi_poly(p, 0, 0, eta[0]) - f *= sympy.sqrt((half + p)) - vals = Uvals[p] - error = 0.0 - for pt, val in zip(rpoints, vals): - fval = f.subs(eta[0], pt[0]) - error = max(error, abs(val - float(fval))) - assert error < 1E-13 - elif dim == 2: - idx = expansions.morton_index2 - for q in range(n + 1): - p = n - q - f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * - sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p) - f *= sympy.sqrt((half + p) * (1 + p + q)) - vals = Uvals[idx(p, q)] - error = 0.0 - for pt, val in zip(rpoints, vals): - eta0 = 2 * (1 + pt[0]) / (1 - pt[1]) - 1 - eta1 = pt[1] - fval = f.subs(eta[1], eta1).subs(eta[0], eta0) - error = max(error, abs(val - float(fval))) - assert error < 1E-13 - elif dim == 3: - idx = expansions.morton_index3 - for r in range(n + 1): - q = n - r - p = n - r - q - f = (sympy.jacobi_poly(p, 0, 0, eta[0]) * - sympy.jacobi_poly(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p * - sympy.jacobi_poly(r, 2*p+2*q+2, 0, eta[2]) * ((1 - eta[2])/2) ** (p+q)) - f *= sympy.sqrt((half + p) * (1 + p + q) * (1+half + p + q + r)) - vals = Uvals[idx(p, q, r)] - error = 0.0 - for pt, val in zip(rpoints, vals): - eta0 = 2 * (1 + pt[0]) / (-pt[1] - pt[2]) - 1 - eta1 = 2 * (1 + pt[1]) / (1 - pt[2]) - 1 - eta2 = pt[2] - fval = f.subs(eta[2], eta2).subs(eta[1], eta1).subs(eta[0], eta0) - error = max(error, abs(val - float(fval))) - assert error < 1E-13 + idx = (lambda p: p, expansions.morton_index2, expansions.morton_index3)[dim-1] + eta = sympy.DeferredVector("eta") + half = sympy.Rational(1, 2) + + def duffy_coords(pt): + if len(pt) == 1: + return pt + elif len(pt) == 2: + eta0 = 2 * (1 + pt[0]) / (1 - pt[1]) - 1 + eta1 = pt[1] + return eta0, eta1 + else: + eta0 = 2 * (1 + pt[0]) / (-pt[1] - pt[2]) - 1 + eta1 = 2 * (1 + pt[1]) / (1 - pt[2]) - 1 + eta2 = pt[2] + return eta0, eta1, eta2 + + def basis(dim, p, q=0, r=0): + if dim >= 1: + f = sympy.jacobi(p, 0, 0, eta[0]) + f *= sympy.sqrt(half + p) + if dim >= 2: + f *= sympy.jacobi(q, 2*p+1, 0, eta[1]) * ((1 - eta[1])/2) ** p + f *= sympy.sqrt(1 + p + q) + if dim >= 3: + f *= sympy.jacobi(r, 2*p+2*q+2, 0, eta[2]) * ((1 - eta[2])/2) ** (p+q) + f *= sympy.sqrt(1 + half + p + q + r) + return f + + def eval_basis(f, pt): + fval = f + for coord, pval in zip(eta, duffy_coords(pt)): + fval = fval.subs(coord, pval) + fval = float(fval) + return fval + + for i in range(n + 1): + for indices in polynomial_set.mis(dim, i): + phi = basis(dim, *indices) + exact = np.array([eval_basis(phi, r) for r in rpoints]) + uh = Uvals[idx(*indices)] + assert np.allclose(uh, exact, atol=1E-14) if __name__ == '__main__': diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index ef39f75fb..dc7a64481 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -85,7 +85,7 @@ def test_symmetry(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 128), (2, 64), (3, 16)]) def test_interpolation(dim, degree): - from FIAT import GaussLobattoLegendre, quadrature + from FIAT import GaussLegendre, quadrature from FIAT.polynomial_set import mis a = (1. + 0.5) @@ -113,7 +113,7 @@ def test_interpolation(dim, degree): k = 1 while k <= degree: - fe = GaussLobattoLegendre(s, k) + fe = GaussLegendre(s, k) tab = fe.tabulate(1, points) coefficients = np.array([v(f) for v in fe.dual_basis()]) From d7f8f0415fe9d84b208ff81544fbe1065794d1da Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 17:21:46 +0000 Subject: [PATCH 64/76] Assert that errors decrease exponentially --- test/unit/test_gauss_legendre.py | 13 +++++++------ test/unit/test_gauss_lobatto_legendre.py | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index dc7a64481..3eb147eb2 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -57,8 +57,8 @@ def test_gl_basis_values(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 4), (2, 4), (3, 4)]) -def test_symmetry(dim, degree): - """ Ensure the dual basis has the right symmetry.""" +def test_edge_dofs(dim, degree): + """ Ensure edge DOFs are point evaluations at GL points.""" from FIAT import GaussLegendre, quadrature, expansions s = symmetric_simplex(dim) @@ -83,12 +83,12 @@ def test_symmetry(dim, degree): assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) -@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 64), (3, 16)]) +@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 32), (3, 16)]) def test_interpolation(dim, degree): from FIAT import GaussLegendre, quadrature from FIAT.polynomial_set import mis - a = (1. + 0.5) + a = 1. + 0.5 a = 0.5 * a**2 r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 f = lambda x: np.exp(a / (r2(x) - a)) @@ -107,7 +107,6 @@ def test_interpolation(dim, degree): i = next(j for j, aj in enumerate(alpha) if aj > 0) f_at_pts[alpha] = df_at_pts[i] - print() scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) @@ -123,7 +122,9 @@ def test_interpolation(dim, degree): err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) - print("dim = %d, degree = %2d, L2-error = %.4E, H1-error = %.4E" % (dim, k, errorL2, errorH1)) + + assert errorL2 < 2 * max(3*np.exp(-k), 1E-15) + assert errorH1 < 2 * max(3*np.exp(-k+1), 1E-13 if dim == 1 else 1E-11) k = min(k * 2, k + 16) diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 932934963..0641d0975 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -57,8 +57,8 @@ def test_gll_basis_values(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 4), (2, 4), (3, 4)]) -def test_symmetry(dim, degree): - """ Ensure the dual basis has the right symmetry.""" +def test_edge_dofs(dim, degree): + """ Ensure edge DOFs are point evaluations at GL points.""" from FIAT import GaussLobattoLegendre, quadrature, expansions s = symmetric_simplex(dim) @@ -84,12 +84,12 @@ def test_symmetry(dim, degree): assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) -@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 64), (3, 16)]) +@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 32), (3, 16)]) def test_interpolation(dim, degree): from FIAT import GaussLobattoLegendre, quadrature from FIAT.polynomial_set import mis - a = (1. + 0.5) + a = 1. + 0.5 a = 0.5 * a**2 r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 f = lambda x: np.exp(a / (r2(x) - a)) @@ -108,7 +108,6 @@ def test_interpolation(dim, degree): i = next(j for j, aj in enumerate(alpha) if aj > 0) f_at_pts[alpha] = df_at_pts[i] - print() scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) @@ -124,7 +123,9 @@ def test_interpolation(dim, degree): err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) - print("dim = %d, degree = %2d, L2-error = %.4E, H1-error = %.4E" % (dim, k, errorL2, errorH1)) + + assert errorL2 < max(3*np.exp(-k), 1E-15) + assert errorH1 < max(3*np.exp(-k+1), 1E-13 if dim == 1 else 1E-11) k = min(k * 2, k + 16) From 51e91ab2a4195bb77250938d73381fc87e605962 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 19:46:07 +0000 Subject: [PATCH 65/76] cleanup, fuse _tabulate and _tabulate_derivatives --- FIAT/barycentric_interpolation.py | 15 +++-- FIAT/expansions.py | 107 ++++++++++++++---------------- 2 files changed, 59 insertions(+), 63 deletions(-) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py index 48f5ae3a0..faae36954 100644 --- a/FIAT/barycentric_interpolation.py +++ b/FIAT/barycentric_interpolation.py @@ -53,12 +53,15 @@ def tabulate(self, n, pts): results = numpy.array(list(map(simplify, results))) return results - def _tabulate_derivatives(self, n, pts): - results = self.tabulate(n, pts) - return results, numpy.dot(self.dmat, results)[:, None, :] - - def tabulate_derivatives(self, n, pts): - return numpy.dot(self.dmat, self.tabulate(n, pts)) + def _tabulate(self, n, pts, order=0): + results = [self.tabulate(n, pts)] + for r in range(order): + results.append(numpy.dot(self.dmat, results[-1])) + for r in range(order+1): + shape = results[r].shape + shape = shape[:1] + (1,)*r + shape[1:] + results[r] = numpy.reshape(results[r], shape) + return results class LagrangePolynomialSet(polynomial_set.PolynomialSet): diff --git a/FIAT/expansions.py b/FIAT/expansions.py index e520df13a..e12ba0cc1 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -53,26 +53,30 @@ def jacobi_factors(x, y, z, dx, dy, dz): return fa, fb, fc, dfa, dfb, dfc -def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None, ddphi=None): +def dubiner_recurrence(dim, n, order, ref_pts, jacobian): """Dubiner recurrence from (Kirby 2010)""" - outer = lambda x, y: x * y[..., None] + if order > 2: + raise ValueError("Higher order derivatives not supported") + + num_members = math.comb(n + dim, dim) + results = tuple([None] * num_members for i in range(order+1)) + phi, dphi, ddphi = results + (None,) * (2-order) + phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) if dphi is not None: dphi[0] = ref_pts - ref_pts if ddphi is not None: - ddphi[0] = outer(dphi[0], dphi[0]) + ddphi[0] = numpy.zeros((dim,) + ref_pts.shape, ref_pts.dtype) if dim == 0 or n == 0: - return + return results if dim > 3 or dim < 0: raise ValueError("Invalid number of spatial dimensions") - idx = (lambda p: p, morton_index2, morton_index3)[dim-1] pad_dim = dim + 2 X = pad_coordinates(ref_pts, pad_dim) - if jacobian is None: - dX = (None,) * pad_dim - else: - dX = pad_jacobian(jacobian, pad_dim) + dX = pad_jacobian(jacobian, pad_dim) + idx = (lambda p: p, morton_index2, morton_index3)[dim-1] + outer = lambda x, y: x * y[None, ...] for codim in range(dim): # Extend the basis from codim to codim + 1 @@ -117,6 +121,7 @@ def dubiner_recurrence(dim, n, ref_pts, phi, jacobian=None, dphi=None, ddphi=Non dphi[idx(*alpha)] *= scale if ddphi is not None: ddphi[idx(*alpha)] *= scale + return results def xi_triangle(eta): @@ -176,22 +181,11 @@ def _mapping(self, pts): return [sum((self.A[i, j] * pts[j] for j in range(m2)), self.b[i]) for i in range(m1)] - def _tabulate(self, n, pts): + def _tabulate(self, n, pts, order=0): """A version of tabulate() that also works for a single point. """ D = self.ref_el.get_spatial_dimension() - results = [None] * self.get_num_members(n) - dubiner_recurrence(D, n, self._mapping(pts), results) - return results - - def _tabulate_derivatives(self, n, pts): - """A version of tabulate_derivatives() that also works for a single point. - """ - D = self.ref_el.get_spatial_dimension() - phi = [None] * self.get_num_members(n) - dphi = [None] * len(phi) - dubiner_recurrence(D, n, self._mapping(pts), phi, jacobian=self.A, dphi=dphi) - return phi, dphi + return dubiner_recurrence(D, n, order, self._mapping(pts), self.A) def get_dmats(self, degree): """Returns a numpy array with the expansion coefficients dmat[k, j, i] @@ -208,7 +202,7 @@ def get_dmats(self, degree): return cache.setdefault(key, numpy.zeros((self.ref_el.get_spatial_dimension(), 1, 1), "d")) pts = reference_element.make_lattice(self.ref_el.get_vertices(), degree, variant="gl") - v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) + v, dv = self._tabulate(degree, numpy.transpose(pts), order=1) dv = numpy.array(dv).transpose((1, 2, 0)) dmats = numpy.linalg.solve(numpy.transpose(v), dv) return cache.setdefault(key, dmats) @@ -217,22 +211,25 @@ def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} D = self.ref_el.get_spatial_dimension() - if order == 0: - base_vals = self.tabulate(degree, pts) - else: - v, dv = self._tabulate_derivatives(degree, numpy.transpose(pts)) - base_vals = numpy.array(v) - dtildes = numpy.array(dv).transpose((1, 0, 2)) - alphas = mis(D, 1) - for alpha in alphas: - result[alpha] = next(dv for dv, ai in zip(dtildes, alpha) if ai > 0) + lorder = min(1, order) + vals = self._tabulate(degree, numpy.transpose(pts), order=lorder) + base_vals = numpy.array(vals[0]) + base_alpha = (0,) * D + result[base_alpha] = base_vals + for r in range(1, 1+lorder): + vr = numpy.transpose(vals[r], tuple(range(1, r+1)) + (0, r+1)) + for indices in product(range(D), repeat=r): + alpha = tuple(map(indices.count, range(D))) + if alpha not in result: + result[alpha] = vr[indices] + if order == lorder: + return result def distance(alpha, beta): return sum(ai != bi for ai, bi in zip(alpha, beta)) - # Only use dmats if order > 1 - dmats = self.get_dmats(degree) if order > 1 else [] - base_alpha = (0,) * D + # Only use dmats if order > lorder + dmats = self.get_dmats(degree) for i in range(order + 1): alphas = mis(D, i) for alpha in alphas: @@ -251,10 +248,11 @@ def distance(alpha, beta): def tabulate(self, n, pts): if len(pts) == 0: return numpy.array([]) - return numpy.array(self._tabulate(n, numpy.transpose(pts))) + results, = self._tabulate(n, numpy.transpose(pts)) + return numpy.array(results) def tabulate_derivatives(self, n, pts): - vals, deriv_vals = self._tabulate_derivatives(n, numpy.transpose(pts)) + vals, deriv_vals = self._tabulate(n, numpy.transpose(pts), order=1) # Create the ordinary data structure. D = self.ref_el.get_spatial_dimension() data = [[(vals[i][j], [deriv_vals[i][r][j] for r in range(D)]) @@ -296,28 +294,23 @@ def __init__(self, ref_el): raise Exception("Must have a line") super(LineExpansionSet, self).__init__(ref_el) - def _tabulate(self, n, pts): - """Returns a numpy array A[i,j] = phi_i(pts[j])""" - if len(pts) > 0: - ref_pts = self._mapping(pts).T - results = jacobi.eval_jacobi_batch(0, 0, n, ref_pts) - for p in range(n + 1): - results[p] *= math.sqrt(p + 0.5) - return results - else: - return [] - - def _tabulate_derivatives(self, n, pts): + def _tabulate(self, n, pts, order=0): """Returns a tuple of (vals, derivs) such that vals[i,j] = phi_i(pts[j]), derivs[i,j] = D vals[i,j].""" - ref_pts = self._mapping(pts).T - derivs = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts) - - # Jacobi polynomials defined on [-1, 1], first derivatives need scaling - derivs *= 2.0 / self.ref_el.volume() - for p in range(n + 1): - derivs[p] *= math.sqrt(p + 0.5) - return self._tabulate(n, pts), derivs[:, None, :] + xs = self._mapping(pts).T + results = [] + scale = numpy.sqrt(0.5 + numpy.arange(n+1)) + for k in range(order+1): + v = numpy.zeros((n + 1, len(xs)), xs.dtype) + if n >= k: + v[k:] = jacobi.eval_jacobi_batch(k, k, n-k, xs) + for p in range(n + 1): + v[p] *= scale[p] + scale[p] *= 0.5 * (p + k + 1) * self.A[0, 0] + shape = v.shape + shape = shape[:1] + (1,) * k + shape[1:] + results.append(v.reshape(shape)) + return tuple(results) class TriangleExpansionSet(ExpansionSet): From dc2ea6c2948cd809d7dcd0cc1ac408469a50c10c Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 23:16:09 +0000 Subject: [PATCH 66/76] Second derivative recurrence is working properly --- FIAT/expansions.py | 47 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index e12ba0cc1..2c4bca996 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -62,21 +62,22 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): results = tuple([None] * num_members for i in range(order+1)) phi, dphi, ddphi = results + (None,) * (2-order) + outer = lambda x, y: x[:, None, ...] * y[None, ...] + sym_outer = lambda x, y: outer(x, y) + outer(y, x) + pad_dim = dim + 2 + dX = pad_jacobian(jacobian, pad_dim) phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) if dphi is not None: - dphi[0] = ref_pts - ref_pts + dphi[0] = (phi[0] - phi[0]) * dX[0] if ddphi is not None: - ddphi[0] = numpy.zeros((dim,) + ref_pts.shape, ref_pts.dtype) + ddphi[0] = outer(dphi[0], dX[0]) if dim == 0 or n == 0: return results if dim > 3 or dim < 0: raise ValueError("Invalid number of spatial dimensions") - pad_dim = dim + 2 X = pad_coordinates(ref_pts, pad_dim) - dX = pad_jacobian(jacobian, pad_dim) idx = (lambda p: p, morton_index2, morton_index3)[dim-1] - outer = lambda x, y: x * y[None, ...] for codim in range(dim): # Extend the basis from codim to codim + 1 @@ -94,7 +95,7 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): dfactor = a * dfa - b * dfb dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor if ddphi is not None: - ddphi[inext] = factor * ddphi[icur] + 2 * outer(dphi[icur], dfactor) + ddphi[inext] = factor * ddphi[icur] + sym_outer(dphi[icur], dfactor) ddfc = 2 * outer(dfb, dfb) # general i by recurrence @@ -110,17 +111,15 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): c * (fc * dphi[iprev] + phi[iprev] * dfc)) if ddphi is None: continue - ddphi[inext] = (factor * ddphi[icur] + 2 * outer(dphi[icur], dfactor) - - c * (fc * ddphi[iprev] + 2 * outer(dphi[iprev], dfc) + phi[iprev] * ddfc)) + ddphi[inext] = (factor * ddphi[icur] + sym_outer(dphi[icur], dfactor) - + c * (fc * ddphi[iprev] + sym_outer(dphi[iprev], dfc) + phi[iprev] * ddfc)) # normalize for alpha in reference_element.lattice_iter(0, n+1, codim+1): + icur = idx(*alpha) scale = math.sqrt(sum(alpha) + 0.5 * len(alpha)) - phi[idx(*alpha)] *= scale - if dphi is not None: - dphi[idx(*alpha)] *= scale - if ddphi is not None: - ddphi[idx(*alpha)] *= scale + for result in results: + result[icur] *= scale return results @@ -174,7 +173,7 @@ def get_num_members(self, n): return math.comb(n + D, D) def _mapping(self, pts): - if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2: + if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2 and pts.dtype != object: return numpy.dot(self.A, pts) + self.b[:, None] else: m1, m2 = self.A.shape @@ -203,7 +202,7 @@ def get_dmats(self, degree): pts = reference_element.make_lattice(self.ref_el.get_vertices(), degree, variant="gl") v, dv = self._tabulate(degree, numpy.transpose(pts), order=1) - dv = numpy.array(dv).transpose((1, 2, 0)) + dv = numpy.transpose(dv, (1, 2, 0)) dmats = numpy.linalg.solve(numpy.transpose(v), dv) return cache.setdefault(key, dmats) @@ -211,7 +210,7 @@ def _tabulate_jet(self, degree, pts, order=0): from FIAT.polynomial_set import mis result = {} D = self.ref_el.get_spatial_dimension() - lorder = min(1, order) + lorder = min(2, order) vals = self._tabulate(degree, numpy.transpose(pts), order=lorder) base_vals = numpy.array(vals[0]) base_alpha = (0,) * D @@ -221,24 +220,18 @@ def _tabulate_jet(self, degree, pts, order=0): for indices in product(range(D), repeat=r): alpha = tuple(map(indices.count, range(D))) if alpha not in result: - result[alpha] = vr[indices] - if order == lorder: - return result + result[alpha] = vr[indices].reshape(base_vals.shape) def distance(alpha, beta): return sum(ai != bi for ai, bi in zip(alpha, beta)) # Only use dmats if order > lorder - dmats = self.get_dmats(degree) - for i in range(order + 1): + for i in range(lorder + 1, order + 1): + dmats = self.get_dmats(degree) alphas = mis(D, i) for alpha in alphas: - if alpha in result: - continue - if len(result) > 0 and i > 0: - base_alpha = next(a for a in result if sum(a) == i-1 and distance(alpha, a) == 1) - base_vals = result[base_alpha] - vals = base_vals + base_alpha = next(a for a in result if sum(a) == i-1 and distance(alpha, a) == 1) + vals = result[base_alpha] for dmat, start, end in zip(dmats, base_alpha, alpha): for j in range(start, end): vals = numpy.dot(dmat.T, vals) From 5e4cb017cbaf8f5e3ee0f2bc3fc14fc6c0b1dd42 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Wed, 8 Nov 2023 23:19:14 +0000 Subject: [PATCH 67/76] flake --- FIAT/expansions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 2c4bca996..51bf88fab 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -62,8 +62,9 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): results = tuple([None] * num_members for i in range(order+1)) phi, dphi, ddphi = results + (None,) * (2-order) - outer = lambda x, y: x[:, None, ...] * y[None, ...] + outer = lambda x, y: x[:, None, ...] * y[None, ...] sym_outer = lambda x, y: outer(x, y) + outer(y, x) + pad_dim = dim + 2 dX = pad_jacobian(jacobian, pad_dim) phi[0] = sum((ref_pts[i] - ref_pts[i] for i in range(dim)), 1.) From 5b8f4e40962f497f497e8c3533043f5bbf21ed08 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 06:20:04 +0000 Subject: [PATCH 68/76] restore point variant --- FIAT/brezzi_douglas_fortin_marini.py | 13 ++++---- FIAT/brezzi_douglas_marini.py | 11 ++++--- FIAT/check_format_variant.py | 4 +-- FIAT/nedelec.py | 14 ++++---- FIAT/nedelec_second_kind.py | 8 ++--- FIAT/raviart_thomas.py | 6 ++-- test/unit/test_fiat.py | 48 ++++++++++++++-------------- 7 files changed, 53 insertions(+), 51 deletions(-) diff --git a/FIAT/brezzi_douglas_fortin_marini.py b/FIAT/brezzi_douglas_fortin_marini.py index 9a68332f3..fb8f81bd8 100644 --- a/FIAT/brezzi_douglas_fortin_marini.py +++ b/FIAT/brezzi_douglas_fortin_marini.py @@ -5,7 +5,7 @@ class BDFMDualSet(dual_set.DualSet): - def __init__(self, ref_el, degree, variant=None): + def __init__(self, ref_el, degree): # Initialize containers for map: mesh_entity -> dof number and # dual basis @@ -19,7 +19,7 @@ def __init__(self, ref_el, degree, variant=None): # codimension 1 facet normals. # note this will die for degree greater than 1. for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -30,7 +30,7 @@ def __init__(self, ref_el, degree, variant=None): # count as internal nodes. tangent_count = 0 for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1, variant=variant) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1) tangent_count += len(pts_cur) for j in range(len(pts_cur)): pt_cur = pts_cur[j] @@ -46,7 +46,8 @@ def __init__(self, ref_el, degree, variant=None): cur = 0 # set codimension 1 (edges 2d, faces 3d) dof - pts_per_facet = len(ref_el.make_points(sd - 1, 0, sd + degree)) + pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree) + pts_per_facet = len(pts_facet_0) entity_ids[sd - 1] = {} for i in range(len(t[sd - 1])): @@ -103,13 +104,13 @@ def BDFMSpace(ref_el, order): class BrezziDouglasFortinMarini(finite_element.CiarletElement): """The BDFM element""" - def __init__(self, ref_el, degree, variant=None): + def __init__(self, ref_el, degree): if degree != 2: raise Exception("BDFM_k elements only valid for k == 2") poly_set = BDFMSpace(ref_el, degree) - dual = BDFMDualSet(ref_el, degree - 1, variant=variant) + dual = BDFMDualSet(ref_el, degree - 1) formdegree = ref_el.get_spatial_dimension() - 1 super(BrezziDouglasFortinMarini, self).__init__(poly_set, dual, degree, formdegree, mapping="contravariant piola") diff --git a/FIAT/brezzi_douglas_marini.py b/FIAT/brezzi_douglas_marini.py index 0554fbb54..233973f41 100644 --- a/FIAT/brezzi_douglas_marini.py +++ b/FIAT/brezzi_douglas_marini.py @@ -47,11 +47,11 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur) nodes.append(l_cur) - else: + elif variant == "point": # Define each functional for the dual set # codimension 1 facets for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -80,7 +80,8 @@ def __init__(self, ref_el, degree, variant, quad_deg): cur = 0 # set codimension 1 (edges 2d, faces 3d) dof - pts_per_facet = len(ref_el.make_points(sd - 1, 0, sd + degree)) + pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree) + pts_per_facet = len(pts_facet_0) entity_ids[sd - 1] = {} for i in range(len(t[sd - 1])): @@ -105,8 +106,8 @@ class BrezziDouglasMarini(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] - "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(div)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/check_format_variant.py b/FIAT/check_format_variant.py index 8e07c1bb2..75a54aa0f 100644 --- a/FIAT/check_format_variant.py +++ b/FIAT/check_format_variant.py @@ -12,10 +12,10 @@ def check_format_variant(variant, degree): quad_degree = int(quad_degree) if quad_degree is not None else (degree + 1) if quad_degree < degree + 1: raise ValueError("Warning, quadrature degree should be at least %s" % (degree + 1)) - elif variant in ["equispaced"]: + elif variant == "point": quad_degree = None else: - raise ValueError('Choose either variant="equispaced" or variant="integral"' + raise ValueError('Choose either variant="point" or variant="integral"' 'or variant="integral(Quadrature degree)"') return (variant, quad_degree) diff --git a/FIAT/nedelec.py b/FIAT/nedelec.py index f85845900..e419a1eb7 100644 --- a/FIAT/nedelec.py +++ b/FIAT/nedelec.py @@ -177,12 +177,12 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - else: + elif variant == "point": num_edges = len(t[1]) # edge tangents for i in range(num_edges): - pts_cur = ref_el.make_points(1, i, degree + 2, variant=variant) + pts_cur = ref_el.make_points(1, i, degree + 2) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) @@ -284,12 +284,12 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - else: + elif variant == "point": num_edges = len(t[1]) for i in range(num_edges): # points to specify P_k on each edge - pts_cur = ref_el.make_points(1, i, degree + 2, variant=variant) + pts_cur = ref_el.make_points(1, i, degree + 2) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) @@ -298,7 +298,7 @@ def __init__(self, ref_el, degree, variant, quad_deg): if degree > 0: # face tangents num_faces = len(t[2]) for i in range(num_faces): # loop over faces - pts_cur = ref_el.make_points(2, i, degree + 2, variant=variant) + pts_cur = ref_el.make_points(2, i, degree + 2) for j in range(len(pts_cur)): # loop over points pt_cur = pts_cur[j] for k in range(2): # loop over tangents @@ -355,8 +355,8 @@ class Nedelec(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] - "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(curl)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/nedelec_second_kind.py b/FIAT/nedelec_second_kind.py index dcb9a5475..3cdf1a0a8 100644 --- a/FIAT/nedelec_second_kind.py +++ b/FIAT/nedelec_second_kind.py @@ -109,11 +109,11 @@ def _generate_edge_dofs(self, cell, degree, offset, variant, quad_deg): jj = Pq_at_qpts.shape[0] * e ids[e] = list(range(offset + jj, offset + jj + Pq_at_qpts.shape[0])) - else: + elif variant == "point": for edge in range(len(cell.get_topology()[1])): # Create points for evaluation of tangential components - points = cell.make_points(1, edge, degree + 2, variant=variant) + points = cell.make_points(1, edge, degree + 2) # A tangential component evaluation for each point dofs += [Tangent(cell, edge, point) for point in points] @@ -224,8 +224,8 @@ class NedelecSecondKind(CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] - "equispaced" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(curl)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree. You might diff --git a/FIAT/raviart_thomas.py b/FIAT/raviart_thomas.py index f1c8d54b7..d8233cbe2 100644 --- a/FIAT/raviart_thomas.py +++ b/FIAT/raviart_thomas.py @@ -98,10 +98,10 @@ def __init__(self, ref_el, degree, variant, quad_deg): l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) nodes.append(l_cur) - else: + elif variant == "point": # codimension 1 facets for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree, variant=variant) + pts_cur = ref_el.make_points(sd - 1, i, sd + degree) for j in range(len(pts_cur)): pt_cur = pts_cur[j] f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) @@ -150,7 +150,7 @@ class RaviartThomas(finite_element.CiarletElement): :arg k: The degree. :arg variant: optional variant specifying the types of nodes. - variant can be chosen from ["equispaced", "integral", "integral(quadrature_degree)"] + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal convergence order in the H(div)-norm "integral" -> dofs are evaluated by quadrature rule of degree k. diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 733571527..86e25ce16 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -150,12 +150,12 @@ def __init__(self, a, b): 'RaviartThomas(S, 1, variant="integral(2)")', 'RaviartThomas(S, 2, variant="integral(3)")', 'RaviartThomas(S, 3, variant="integral(4)")', - 'RaviartThomas(T, 1, variant="equispaced")', - 'RaviartThomas(T, 2, variant="equispaced")', - 'RaviartThomas(T, 3, variant="equispaced")', - 'RaviartThomas(S, 1, variant="equispaced")', - 'RaviartThomas(S, 2, variant="equispaced")', - 'RaviartThomas(S, 3, variant="equispaced")', + 'RaviartThomas(T, 1, variant="point")', + 'RaviartThomas(T, 2, variant="point")', + 'RaviartThomas(T, 3, variant="point")', + 'RaviartThomas(S, 1, variant="point")', + 'RaviartThomas(S, 2, variant="point")', + 'RaviartThomas(S, 3, variant="point")', "DiscontinuousRaviartThomas(T, 1)", "DiscontinuousRaviartThomas(T, 2)", "DiscontinuousRaviartThomas(T, 3)", @@ -180,12 +180,12 @@ def __init__(self, a, b): 'BrezziDouglasMarini(S, 1, variant="integral(2)")', 'BrezziDouglasMarini(S, 2, variant="integral(3)")', 'BrezziDouglasMarini(S, 3, variant="integral(4)")', - 'BrezziDouglasMarini(T, 1, variant="equispaced")', - 'BrezziDouglasMarini(T, 2, variant="equispaced")', - 'BrezziDouglasMarini(T, 3, variant="equispaced")', - 'BrezziDouglasMarini(S, 1, variant="equispaced")', - 'BrezziDouglasMarini(S, 2, variant="equispaced")', - 'BrezziDouglasMarini(S, 3, variant="equispaced")', + 'BrezziDouglasMarini(T, 1, variant="point")', + 'BrezziDouglasMarini(T, 2, variant="point")', + 'BrezziDouglasMarini(T, 3, variant="point")', + 'BrezziDouglasMarini(S, 1, variant="point")', + 'BrezziDouglasMarini(S, 2, variant="point")', + 'BrezziDouglasMarini(S, 3, variant="point")', "Nedelec(T, 1)", "Nedelec(T, 2)", "Nedelec(T, 3)", @@ -204,12 +204,12 @@ def __init__(self, a, b): 'Nedelec(S, 1, variant="integral(2)")', 'Nedelec(S, 2, variant="integral(3)")', 'Nedelec(S, 3, variant="integral(4)")', - 'Nedelec(T, 1, variant="equispaced")', - 'Nedelec(T, 2, variant="equispaced")', - 'Nedelec(T, 3, variant="equispaced")', - 'Nedelec(S, 1, variant="equispaced")', - 'Nedelec(S, 2, variant="equispaced")', - 'Nedelec(S, 3, variant="equispaced")', + 'Nedelec(T, 1, variant="point")', + 'Nedelec(T, 2, variant="point")', + 'Nedelec(T, 3, variant="point")', + 'Nedelec(S, 1, variant="point")', + 'Nedelec(S, 2, variant="point")', + 'Nedelec(S, 3, variant="point")', "NedelecSecondKind(T, 1)", "NedelecSecondKind(T, 2)", "NedelecSecondKind(T, 3)", @@ -228,12 +228,12 @@ def __init__(self, a, b): 'NedelecSecondKind(S, 1, variant="integral(2)")', 'NedelecSecondKind(S, 2, variant="integral(3)")', 'NedelecSecondKind(S, 3, variant="integral(4)")', - 'NedelecSecondKind(T, 1, variant="equispaced")', - 'NedelecSecondKind(T, 2, variant="equispaced")', - 'NedelecSecondKind(T, 3, variant="equispaced")', - 'NedelecSecondKind(S, 1, variant="equispaced")', - 'NedelecSecondKind(S, 2, variant="equispaced")', - 'NedelecSecondKind(S, 3, variant="equispaced")', + 'NedelecSecondKind(T, 1, variant="point")', + 'NedelecSecondKind(T, 2, variant="point")', + 'NedelecSecondKind(T, 3, variant="point")', + 'NedelecSecondKind(S, 1, variant="point")', + 'NedelecSecondKind(S, 2, variant="point")', + 'NedelecSecondKind(S, 3, variant="point")', "Regge(T, 0)", "Regge(T, 1)", "Regge(T, 2)", From 94c6729e763acfd860efe5b9dc32e8ed67a47320 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 07:17:54 +0000 Subject: [PATCH 69/76] cleanup --- FIAT/expansions.py | 4 ++-- FIAT/polynomial_set.py | 30 ++++++++++++++---------------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index 51bf88fab..c368e9e61 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -83,6 +83,7 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): for codim in range(dim): # Extend the basis from codim to codim + 1 fa, fb, fc, dfa, dfb, dfc = jacobi_factors(*X[codim:codim+3], *dX[codim:codim+3]) + ddfc = 2 * outer(dfb, dfb) for sub_index in reference_element.lattice_iter(0, n, codim): # handle i = 1 icur = idx(*sub_index, 0) @@ -97,7 +98,6 @@ def dubiner_recurrence(dim, n, order, ref_pts, jacobian): dphi[inext] = factor * dphi[icur] + phi[icur] * dfactor if ddphi is not None: ddphi[inext] = factor * ddphi[icur] + sym_outer(dphi[icur], dfactor) - ddfc = 2 * outer(dfb, dfb) # general i by recurrence for i in range(1, n - sum(sub_index)): @@ -174,7 +174,7 @@ def get_num_members(self, n): return math.comb(n + D, D) def _mapping(self, pts): - if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2 and pts.dtype != object: + if isinstance(pts, numpy.ndarray) and len(pts.shape) == 2: return numpy.dot(self.A, pts) + self.b[:, None] else: m1, m2 = self.A.shape diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 4a7eac766..f5d98abe1 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -127,25 +127,24 @@ def __init__(self, ref_el, degree, shape=tuple()): else: flat_shape = numpy.ravel(shape) num_components = numpy.prod(flat_shape) - num_exp_functions = expansions.polynomial_dimension(ref_el, degree) + expansion_set = expansions.ExpansionSet(ref_el) + num_exp_functions = expansion_set.get_num_members(degree) num_members = num_components * num_exp_functions embedded_degree = degree - expansion_set = expansions.ExpansionSet(ref_el) - - # set up coefficients - coeffs_shape = (num_members,) + shape + (num_exp_functions,) - coeffs = numpy.zeros(coeffs_shape, "d") - - # use functional's index_iterator function - cur_bf = 0 if shape == tuple(): coeffs = numpy.eye(num_members) else: + # set up coefficients + coeffs_shape = (num_members, *shape, num_exp_functions) + coeffs = numpy.zeros(coeffs_shape, "d") + + # use functional's index_iterator function + cur_bf = 0 for idx in index_iterator(shape): n = expansions.polynomial_dimension(ref_el, embedded_degree) for exp_bf in range(n): - cur_idx = (cur_bf,) + tuple(idx) + (exp_bf,) + cur_idx = (cur_bf, *idx, exp_bf) coeffs[cur_idx] = 1.0 cur_bf += 1 @@ -220,24 +219,23 @@ def __init__(self, ref_el, degree, size=None): size = sd shape = (size, size) - num_exp_functions = expansions.polynomial_dimension(ref_el, degree) + expansion_set = expansions.ExpansionSet(ref_el) + num_exp_functions = expansion_set.get_num_members(degree) num_components = size * (size + 1) // 2 num_members = num_components * num_exp_functions embedded_degree = degree - expansion_set = expansions.ExpansionSet(ref_el) # set up coefficients for symmetric tensors - coeffs_shape = (num_members,) + shape + (num_exp_functions,) + coeffs_shape = (num_members, *shape, num_exp_functions) coeffs = numpy.zeros(coeffs_shape, "d") cur_bf = 0 for i, j in index_iterator(shape): - n = expansions.polynomial_dimension(ref_el, embedded_degree) if i == j: - for exp_bf in range(n): + for exp_bf in range(num_exp_functions): coeffs[cur_bf, i, j, exp_bf] = 1.0 cur_bf += 1 elif i < j: - for exp_bf in range(n): + for exp_bf in range(num_exp_functions): coeffs[cur_bf, i, j, exp_bf] = 1.0 coeffs[cur_bf, j, i, exp_bf] = 1.0 cur_bf += 1 From d3c0a11c83bb1cd6d33f7863ad20a3fac24bb499 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 17:19:29 +0000 Subject: [PATCH 70/76] Test exponential convergence and for condition number growth --- test/unit/test_gauss_legendre.py | 79 +++++++++++++++--------- test/unit/test_gauss_lobatto_legendre.py | 79 +++++++++++++++--------- 2 files changed, 98 insertions(+), 60 deletions(-) diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index 3eb147eb2..fa1ad1aed 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -33,7 +33,7 @@ def symmetric_simplex(dim): s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: h = 3.**0.5 / dim - s.vertices = [(-h, h, h), (h, -h, h), (h, h, -h), (h, h, h)] + s.vertices = [(h, -h, -h), (-h, h, -h), (-h, -h, h), (h, h, h)] return s @@ -83,49 +83,68 @@ def test_edge_dofs(dim, degree): assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) -@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 32), (3, 16)]) +@pytest.mark.parametrize("dim, degree", [(1, 64), (2, 16), (3, 16)]) def test_interpolation(dim, degree): from FIAT import GaussLegendre, quadrature - from FIAT.polynomial_set import mis - a = 1. + 0.5 - a = 0.5 * a**2 - r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 - f = lambda x: np.exp(a / (r2(x) - a)) - df = lambda x: f(x) * (-a*(r2(x) - a)**-2) + # f = Runge radial function + A = 25 + r2 = lambda x: np.linalg.norm(x, axis=-1)**2 + f = lambda x: 1/(1 + A*r2(x)) s = symmetric_simplex(dim) - rule = quadrature.make_quadrature(s, degree + 1) + rule = quadrature.make_quadrature(s, 2*degree+1) points = rule.get_points() - weights = rule.get_weights() - - f_at_pts = {} - f_at_pts[(0,) * dim] = f(points) - df_at_pts = df(points) * points.T - alphas = mis(dim, 1) - for alpha in alphas: - i = next(j for j, aj in enumerate(alpha) if aj > 0) - f_at_pts[alpha] = df_at_pts[i] - - scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) - scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) + f_at_pts = f(points) k = 1 + errors = [] + degrees = [] while k <= degree: fe = GaussLegendre(s, k) - tab = fe.tabulate(1, points) + # interpolate f onto FE space: dual evaluation coefficients = np.array([v(f) for v in fe.dual_basis()]) + # interpolate FE space onto quadrature points + tab = fe.tabulate(0, points)[(0,)*dim] + # compute max error + errors.append(max(abs(f_at_pts - np.dot(coefficients, tab)))) + degrees.append(k) + k *= 2 - alpha = (0,) * dim - err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) - errorL2 = scaleL2 * np.sqrt(np.dot(weights, err ** 2)) + errors = np.array(errors) + degrees = np.array(degrees) - err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) - errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) + # Test for exponential convergence + C = np.sqrt(1/A) + np.sqrt(1+1/A) + assert all(errors < 2.0 * C**-degrees) + + +@pytest.mark.parametrize("degree", [4, 8, 12, 16]) +@pytest.mark.parametrize("dim", [1, 2, 3]) +def test_conditioning(dim, degree): + from FIAT import GaussLegendre, quadrature - assert errorL2 < 2 * max(3*np.exp(-k), 1E-15) - assert errorH1 < 2 * max(3*np.exp(-k+1), 1E-13 if dim == 1 else 1E-11) - k = min(k * 2, k + 16) + s = symmetric_simplex(dim) + rule = quadrature.make_quadrature(s, degree + 1) + points = rule.get_points() + weights = rule.get_weights() + + fe = GaussLegendre(s, degree) + phi = fe.tabulate(1, points) + v = phi[(0,) * dim] + grads = [phi[alpha] for alpha in phi if sum(alpha) == 1] + M = np.dot(v, weights[:, None] * v.T) + K = sum(np.dot(dv, weights[:, None] * dv.T) for dv in grads) + + def cond(A): + a = np.linalg.eigvalsh(A) + a = a[abs(a) > 1E-12] + return max(a) / min(a) + + kappaM = cond(M) + kappaK = cond(K) + assert kappaM ** (1/degree) < dim + 1 + assert kappaK ** (1/degree) < dim + 2 if __name__ == '__main__': diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index 0641d0975..b976b3121 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -33,7 +33,7 @@ def symmetric_simplex(dim): s.vertices = [(0., 1.), (-h, -0.5), (h, -0.5)] elif dim == 3: h = 3.**0.5 / dim - s.vertices = [(-h, h, h), (h, -h, h), (h, h, -h), (h, h, h)] + s.vertices = [(h, -h, -h), (-h, h, -h), (-h, -h, h), (h, h, h)] return s @@ -84,49 +84,68 @@ def test_edge_dofs(dim, degree): assert np.allclose(points[edge_dofs[entity]], np.array(list(map(transform, quadrature_points)))) -@pytest.mark.parametrize("dim, degree", [(1, 128), (2, 32), (3, 16)]) +@pytest.mark.parametrize("dim, degree", [(1, 64), (2, 16), (3, 16)]) def test_interpolation(dim, degree): from FIAT import GaussLobattoLegendre, quadrature - from FIAT.polynomial_set import mis - a = 1. + 0.5 - a = 0.5 * a**2 - r2 = lambda x: 0.5 * np.linalg.norm(x, axis=-1)**2 - f = lambda x: np.exp(a / (r2(x) - a)) - df = lambda x: f(x) * (-a*(r2(x) - a)**-2) + # f = Runge radial function + A = 25 + r2 = lambda x: np.linalg.norm(x, axis=-1)**2 + f = lambda x: 1/(1 + A*r2(x)) s = symmetric_simplex(dim) - rule = quadrature.make_quadrature(s, degree + 1) + rule = quadrature.make_quadrature(s, 2*degree+1) points = rule.get_points() - weights = rule.get_weights() - - f_at_pts = {} - f_at_pts[(0,) * dim] = f(points) - df_at_pts = df(points) * points.T - alphas = mis(dim, 1) - for alpha in alphas: - i = next(j for j, aj in enumerate(alpha) if aj > 0) - f_at_pts[alpha] = df_at_pts[i] - - scaleL2 = 1 / np.sqrt(np.dot(weights, f(points)**2)) - scaleH1 = 1 / np.sqrt(np.dot(weights, sum(f_at_pts[alpha]**2 for alpha in f_at_pts))) + f_at_pts = f(points) k = 1 + errors = [] + degrees = [] while k <= degree: fe = GaussLobattoLegendre(s, k) - tab = fe.tabulate(1, points) + # interpolate f onto FE space: dual evaluation coefficients = np.array([v(f) for v in fe.dual_basis()]) + # interpolate FE space onto quadrature points + tab = fe.tabulate(0, points)[(0,)*dim] + # compute max error + errors.append(max(abs(f_at_pts - np.dot(coefficients, tab)))) + degrees.append(k) + k *= 2 - alpha = (0,) * dim - err = f_at_pts[alpha] - np.dot(coefficients, tab[alpha]) - errorL2 = scaleL2 * np.sqrt(np.dot(weights, err ** 2)) + errors = np.array(errors) + degrees = np.array(degrees) - err2 = sum((f_at_pts[alpha] - np.dot(coefficients, tab[alpha])) ** 2 for alpha in tab) - errorH1 = scaleH1 * np.sqrt(np.dot(weights, err2)) + # Test for exponential convergence + C = np.sqrt(1/A) + np.sqrt(1+1/A) + assert all(errors < 1.5 * C**-degrees) + + +@pytest.mark.parametrize("degree", [4, 8, 12, 16]) +@pytest.mark.parametrize("dim", [1, 2, 3]) +def test_conditioning(dim, degree): + from FIAT import GaussLobattoLegendre, quadrature - assert errorL2 < max(3*np.exp(-k), 1E-15) - assert errorH1 < max(3*np.exp(-k+1), 1E-13 if dim == 1 else 1E-11) - k = min(k * 2, k + 16) + s = symmetric_simplex(dim) + rule = quadrature.make_quadrature(s, degree + 1) + points = rule.get_points() + weights = rule.get_weights() + + fe = GaussLobattoLegendre(s, degree) + phi = fe.tabulate(1, points) + v = phi[(0,) * dim] + grads = [phi[alpha] for alpha in phi if sum(alpha) == 1] + M = np.dot(v, weights[:, None] * v.T) + K = sum(np.dot(dv, weights[:, None] * dv.T) for dv in grads) + + def cond(A): + a = np.linalg.eigvalsh(A) + a = a[abs(a) > 1E-12] + return max(a) / min(a) + + kappaM = cond(M) + kappaK = cond(K) + assert kappaM ** (1/degree) < dim + 1 + assert kappaK ** (1/degree) < dim + 2 if __name__ == '__main__': From d5bd55652e7ba28d78b5930e48fc2b12d4035a21 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 17:44:30 +0000 Subject: [PATCH 71/76] QR on Vandermonde matrix to detect singular system --- FIAT/finite_element.py | 7 ++++--- test/unit/test_gauss_legendre.py | 6 +++--- test/unit/test_gauss_lobatto_legendre.py | 6 +++--- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index 32381a473..bab2b53de 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -130,9 +130,10 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref self.V = V # new_coeffs_flat = numpy.linalg.solve(V.T, B) - Q, R = numpy.linalg.qr(A) - VTQ = numpy.dot(B, numpy.transpose(R)) - new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(VTQ, B)) + Q, R = numpy.linalg.qr(V) + if any(abs(numpy.diag(R)) < 1E-14): + raise numpy.linalg.LinAlgError("Singular Vandermonde matrix") + new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(R.T, B)) new_shp = new_coeffs_flat.shape[:1] + shp[1:] new_coeffs = new_coeffs_flat.reshape(new_shp) diff --git a/test/unit/test_gauss_legendre.py b/test/unit/test_gauss_legendre.py index fa1ad1aed..0acc9bb4d 100644 --- a/test/unit/test_gauss_legendre.py +++ b/test/unit/test_gauss_legendre.py @@ -85,7 +85,7 @@ def test_edge_dofs(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 64), (2, 16), (3, 16)]) def test_interpolation(dim, degree): - from FIAT import GaussLegendre, quadrature + from FIAT import GaussLegendre, reference_element # f = Runge radial function A = 25 @@ -93,8 +93,8 @@ def test_interpolation(dim, degree): f = lambda x: 1/(1 + A*r2(x)) s = symmetric_simplex(dim) - rule = quadrature.make_quadrature(s, 2*degree+1) - points = rule.get_points() + points = reference_element.make_lattice(s.get_vertices(), 2*degree+1, variant="gl") + points = np.array(points) f_at_pts = f(points) k = 1 diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/unit/test_gauss_lobatto_legendre.py index b976b3121..defa77dfb 100644 --- a/test/unit/test_gauss_lobatto_legendre.py +++ b/test/unit/test_gauss_lobatto_legendre.py @@ -86,7 +86,7 @@ def test_edge_dofs(dim, degree): @pytest.mark.parametrize("dim, degree", [(1, 64), (2, 16), (3, 16)]) def test_interpolation(dim, degree): - from FIAT import GaussLobattoLegendre, quadrature + from FIAT import GaussLobattoLegendre, reference_element # f = Runge radial function A = 25 @@ -94,8 +94,8 @@ def test_interpolation(dim, degree): f = lambda x: 1/(1 + A*r2(x)) s = symmetric_simplex(dim) - rule = quadrature.make_quadrature(s, 2*degree+1) - points = rule.get_points() + points = reference_element.make_lattice(s.get_vertices(), 2*degree+1, variant="gl") + points = np.array(points) f_at_pts = f(points) k = 1 From b0173d4c6b4100a04de987941c8af9654966a101 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 18:33:57 +0000 Subject: [PATCH 72/76] Solve Vandermonde with scipy LU --- FIAT/finite_element.py | 11 ++++++++--- setup.py | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/FIAT/finite_element.py b/FIAT/finite_element.py index bab2b53de..500821531 100644 --- a/FIAT/finite_element.py +++ b/FIAT/finite_element.py @@ -9,6 +9,8 @@ # Modified by Thomas H. Gibson (t.gibson15@imperial.ac.uk), 2016 import numpy +import scipy +import warnings from FIAT.dual_set import DualSet from FIAT.polynomial_set import PolynomialSet @@ -130,10 +132,13 @@ def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref self.V = V # new_coeffs_flat = numpy.linalg.solve(V.T, B) - Q, R = numpy.linalg.qr(V) - if any(abs(numpy.diag(R)) < 1E-14): + warnings.filterwarnings("error") + try: + LU, piv = scipy.linalg.lu_factor(V) + new_coeffs_flat = scipy.linalg.lu_solve((LU, piv), B, trans=1) + except scipy.linalg.LinAlgWarning: raise numpy.linalg.LinAlgError("Singular Vandermonde matrix") - new_coeffs_flat = numpy.dot(Q, numpy.linalg.solve(R.T, B)) + warnings.resetwarnings() new_shp = new_coeffs_flat.shape[:1] + shp[1:] new_coeffs = new_coeffs_flat.reshape(new_shp) diff --git a/setup.py b/setup.py index fa5a47dc4..20ba60391 100755 --- a/setup.py +++ b/setup.py @@ -27,4 +27,4 @@ download_url=tarball, license="LGPL v3 or later", packages=["FIAT"], - install_requires=["numpy", "sympy", "recursivenodes"]) + install_requires=["numpy", "recursivenodes", "scipy", "sympy"]) From 05d36c9053b08c6ff2b4d9a66f0e7a6be4d4c7f8 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 20:16:30 +0000 Subject: [PATCH 73/76] revert num_exp_function to use polynomial_dimension --- FIAT/polynomial_set.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index f5d98abe1..8a61aaddc 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -127,10 +127,10 @@ def __init__(self, ref_el, degree, shape=tuple()): else: flat_shape = numpy.ravel(shape) num_components = numpy.prod(flat_shape) - expansion_set = expansions.ExpansionSet(ref_el) - num_exp_functions = expansion_set.get_num_members(degree) + num_exp_functions = expansions.polynomial_dimension(ref_el, degree) num_members = num_components * num_exp_functions embedded_degree = degree + expansion_set = expansions.ExpansionSet(ref_el) if shape == tuple(): coeffs = numpy.eye(num_members) @@ -219,11 +219,11 @@ def __init__(self, ref_el, degree, size=None): size = sd shape = (size, size) - expansion_set = expansions.ExpansionSet(ref_el) - num_exp_functions = expansion_set.get_num_members(degree) + num_exp_functions = expansions.polynomial_dimension(ref_el, degree) num_components = size * (size + 1) // 2 num_members = num_components * num_exp_functions embedded_degree = degree + expansion_set = expansions.ExpansionSet(ref_el) # set up coefficients for symmetric tensors coeffs_shape = (num_members, *shape, num_exp_functions) From 68c57e8f27b49ce6ed84e82f20d0ab75fd933933 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 20:44:16 +0000 Subject: [PATCH 74/76] test that DG(point, 1) raises ValueError --- test/unit/test_fiat.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index 86e25ce16..57c0ef5f7 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -517,6 +517,15 @@ def test_error_quadrature_degree(element): eval(element) +@pytest.mark.parametrize('element', [ + 'DiscontinuousLagrange(P, 1)', + 'GaussLegendre(P, 1)' +]) +def test_error_point_high_order(element): + with pytest.raises(ValueError): + eval(element) + + @pytest.mark.parametrize('cell', [I, T, S]) def test_expansion_orthonormality(cell): from FIAT import expansions, quadrature @@ -545,7 +554,7 @@ def test_expansion_values(dim): dpoints.append(tuple(2*np.array(alpha, dtype="d")/npoints-1)) rpoints.append(tuple(2*sympy.Rational(a, npoints)-1 for a in alpha)) - n = 20 + n = 16 Uvals = U.tabulate(n, dpoints) idx = (lambda p: p, expansions.morton_index2, expansions.morton_index3)[dim-1] eta = sympy.DeferredVector("eta") From f628a01efe9bcdc511ce8a91176756845d30add2 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Thu, 9 Nov 2023 21:04:39 +0000 Subject: [PATCH 75/76] avoid tiny roundoff error showing in CI --- FIAT/expansions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FIAT/expansions.py b/FIAT/expansions.py index c368e9e61..d45724fbe 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -43,7 +43,7 @@ def pad_jacobian(A, embedded_dim): def jacobi_factors(x, y, z, dx, dy, dz): fb = 0.5 * (y + z) - fa = x + fb + 1.0 + fa = x + (fb + 1.0) fc = fb ** 2 dfa = dfb = dfc = None if dx is not None: From ae0966752da99c6398fc7c209532a2a7f4b0ba44 Mon Sep 17 00:00:00 2001 From: Pablo Brubeck Date: Fri, 10 Nov 2023 18:42:02 +0000 Subject: [PATCH 76/76] Use ONPolynomialSet for equispaced Lagrange --- FIAT/lagrange.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/FIAT/lagrange.py b/FIAT/lagrange.py index 12816ad0a..540894f5e 100644 --- a/FIAT/lagrange.py +++ b/FIAT/lagrange.py @@ -7,6 +7,8 @@ from FIAT import finite_element, polynomial_set, dual_set, functional from FIAT.orientation_utils import make_entity_permutations_simplex +from FIAT.barycentric_interpolation import LagrangePolynomialSet +from FIAT.reference_element import LINE class LagrangeDualSet(dual_set.DualSet): @@ -14,7 +16,7 @@ class LagrangeDualSet(dual_set.DualSet): simplices of any dimension. Nodes are point evaluation at equispaced points.""" - def __init__(self, ref_el, degree, variant=None): + def __init__(self, ref_el, degree, variant="equispaced"): entity_ids = {} nodes = [] entity_permutations = {} @@ -44,8 +46,18 @@ def __init__(self, ref_el, degree, variant=None): class Lagrange(finite_element.CiarletElement): """The Lagrange finite element. It is what it is.""" - def __init__(self, ref_el, degree): - poly_set = polynomial_set.ONPolynomialSet(ref_el, degree) - dual = LagrangeDualSet(ref_el, degree) + def __init__(self, ref_el, degree, variant="equispaced"): + dual = LagrangeDualSet(ref_el, degree, variant=variant) + if ref_el.shape == LINE and variant != "equispaced": + # In 1D we can use the primal basis as the expansion set, + # avoiding any round-off coming from a basis transformation + points = [] + for node in dual.nodes: + # Assert singleton point for each node. + pt, = node.get_point_dict().keys() + points.append(pt) + poly_set = LagrangePolynomialSet(ref_el, points) + else: + poly_set = polynomial_set.ONPolynomialSet(ref_el, degree) formdegree = 0 # 0-form super(Lagrange, self).__init__(poly_set, dual, degree, formdegree)