diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..15c4bb309 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,32 @@ +name: github pages + +on: + push: + branches: + - master + +jobs: + build_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup python + uses: actions/setup-python@v3 + with: + python-version: 3.12 + - name: Install + run: | + python -m pip install .[doc] + - name: Build docs + run: | + make -C docs html + - name: Deploy + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs/build/html + publish_branch: gh-pages + enable_jekyll: false + allow_empty_commit: false + user_name: 'github-actions[bot]' + user_email: 'github-actions[bot]@users.noreply.github.com' diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index ce154290f..45581cf42 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -4,7 +4,11 @@ name: FIAT CI -on: [push, pull_request] +on: + push: + branches: + - master + pull_request: jobs: build: @@ -12,7 +16,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, macos-latest] - python-version: ['3.9', '3.10', '3.11', '3.12'] + python-version: ['3.10', '3.11', '3.12', '3.13'] steps: - uses: actions/checkout@v4 @@ -28,12 +32,14 @@ jobs: run: | python -m pip install pydocstyle python -m pydocstyle . - - name: Install FIAT - run: pip install . - - name: Test with pytest + - name: Install FIAT and CI dependencies run: | - python -m pip install coveralls pytest pytest-cov pytest-xdist - DATA_REPO_GIT="" python -m pytest --cov=FIAT/ test/ + python -m pip install '.[test]' + python -m pip install coveralls pytest-cov + - name: Test FIAT + run: DATA_REPO_GIT="" python -m pytest --cov=FIAT/ test/FIAT + - name: Test FInAT + run: DATA_REPO_GIT="" python -m pytest --cov=finat/ --cov=gem/ test/finat - name: Coveralls if: ${{ github.repository == 'FEniCS/fiat' && github.head_ref == '' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' }} env: diff --git a/.gitignore b/.gitignore index 26346c53d..b41093856 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,8 @@ /doc/sphinx/source/api-doc release/ -/doc/sphinx/source/_build/ \ No newline at end of file +/docs/build/ +/docs/source/FIAT.rst +/docs/source/finat.rst +/docs/source/finat.ufl.rst +/docs/source/gem.rst diff --git a/doc/sphinx/requirements.txt b/doc/sphinx/requirements.txt deleted file mode 100644 index 0a3d875fb..000000000 --- a/doc/sphinx/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy -sympy -sphinx==1.7.0 diff --git a/doc/sphinx/Makefile b/docs/Makefile similarity index 97% rename from doc/sphinx/Makefile rename to docs/Makefile index 995d1834e..60333b3ae 100644 --- a/doc/sphinx/Makefile +++ b/docs/Makefile @@ -49,7 +49,12 @@ help: clean: rm -rf $(BUILDDIR)/* -html: +apidoc: + sphinx-apidoc -f -T -o source/ ../FIAT + sphinx-apidoc -f -T -o source/ ../finat + sphinx-apidoc -f -T -o source/ ../gem + +html: apidoc $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." diff --git a/docs/images/logo.png b/docs/images/logo.png new file mode 100644 index 000000000..0eab2ed5c Binary files /dev/null and b/docs/images/logo.png differ diff --git a/docs/images/logo.svg b/docs/images/logo.svg new file mode 100644 index 000000000..2151002a4 --- /dev/null +++ b/docs/images/logo.svg @@ -0,0 +1,194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + φ + 0 + + + + diff --git a/docs/source/_themes/finat/README b/docs/source/_themes/finat/README new file mode 100644 index 000000000..3e4bdedc9 --- /dev/null +++ b/docs/source/_themes/finat/README @@ -0,0 +1,4 @@ +This is the Sphinx theme for the FInAT web page +It was originally based on the Firedrake web page, +which was based on the dolfin-adjoint theme, +which was in turn based oh the FEniCS project theme. diff --git a/docs/source/_themes/finat/layout.html b/docs/source/_themes/finat/layout.html new file mode 100644 index 000000000..9b22d0f09 --- /dev/null +++ b/docs/source/_themes/finat/layout.html @@ -0,0 +1,41 @@ +{% extends "basic/layout.html" %} + +{% block extrahead %} + + + + +{% if theme_favicon %} + +{% endif %} +{% endblock %} + +{# override upper relbar to show our navigation menu #} +{% block relbar1 %} +
+ FInAT Project Banner +
+ +
+
+{% endblock %} + +{# do not display lower relbar #} +{% block relbar2 %}{% endblock %} + +{# do not display sidebars #} +{% block sidebar1 %}{% endblock %} +{% block sidebar2 %}{% endblock %} diff --git a/docs/source/_themes/finat/static/banner.png b/docs/source/_themes/finat/static/banner.png new file mode 100644 index 000000000..46d02b19e Binary files /dev/null and b/docs/source/_themes/finat/static/banner.png differ diff --git a/docs/source/_themes/finat/static/banner.svg b/docs/source/_themes/finat/static/banner.svg new file mode 100644 index 000000000..076bacc2f --- /dev/null +++ b/docs/source/_themes/finat/static/banner.svg @@ -0,0 +1,1396 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + φ + 0 + + + + + + + FInAT + + + diff --git a/docs/source/_themes/finat/static/dialog-note.png b/docs/source/_themes/finat/static/dialog-note.png new file mode 100644 index 000000000..263fbd586 Binary files /dev/null and b/docs/source/_themes/finat/static/dialog-note.png differ diff --git a/docs/source/_themes/finat/static/dialog-seealso.png b/docs/source/_themes/finat/static/dialog-seealso.png new file mode 100644 index 000000000..3eb7b05c8 Binary files /dev/null and b/docs/source/_themes/finat/static/dialog-seealso.png differ diff --git a/docs/source/_themes/finat/static/dialog-topic.png b/docs/source/_themes/finat/static/dialog-topic.png new file mode 100644 index 000000000..2ac57475c Binary files /dev/null and b/docs/source/_themes/finat/static/dialog-topic.png differ diff --git a/docs/source/_themes/finat/static/dialog-warning.png b/docs/source/_themes/finat/static/dialog-warning.png new file mode 100644 index 000000000..7233d45d8 Binary files /dev/null and b/docs/source/_themes/finat/static/dialog-warning.png differ diff --git a/docs/source/_themes/finat/static/epub.css b/docs/source/_themes/finat/static/epub.css new file mode 100644 index 000000000..28dff738b --- /dev/null +++ b/docs/source/_themes/finat/static/epub.css @@ -0,0 +1,310 @@ +/* + * default.css_t + * ~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- default theme. + * + * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: {{ theme_bodyfont }}; + font-size: 100%; + background-color: {{ theme_footerbgcolor }}; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: {{ theme_sidebarbgcolor }}; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: {{ theme_bgcolor }}; + color: {{ theme_textcolor }}; + padding: 0 20px 30px 20px; +} + +{%- if theme_rightsidebar|tobool %} +div.bodywrapper { + margin: 0 230px 0 0; +} +{%- endif %} + +div.footer { + color: {{ theme_footertextcolor }}; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: {{ theme_footertextcolor }}; + text-decoration: underline; +} + +div.related { + background-color: {{ theme_relbarbgcolor }}; + line-height: 30px; + color: {{ theme_relbartextcolor }}; +} + +div.related a { + color: {{ theme_relbarlinkcolor }}; +} + +div.sphinxsidebar { + {%- if theme_stickysidebar|tobool %} + top: 30px; + bottom: 0; + margin: 0; + position: fixed; + overflow: auto; + height: auto; + {%- endif %} + {%- if theme_rightsidebar|tobool %} + float: right; + {%- if theme_stickysidebar|tobool %} + right: 0; + {%- endif %} + {%- endif %} +} + +{%- if theme_stickysidebar|tobool %} +/* this is nice, but it it leads to hidden headings when jumping + to an anchor */ +/* +div.related { + position: fixed; +} + +div.documentwrapper { + margin-top: 30px; +} +*/ +{%- endif %} + +div.sphinxsidebar h3 { + font-family: {{ theme_headfont }}; + color: {{ theme_sidebartextcolor }}; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: {{ theme_sidebartextcolor }}; +} + +div.sphinxsidebar h4 { + font-family: {{ theme_headfont }}; + color: {{ theme_sidebartextcolor }}; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: {{ theme_sidebartextcolor }}; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: {{ theme_sidebartextcolor }}; +} + +div.sphinxsidebar a { + color: {{ theme_sidebarlinkcolor }}; +} + +div.sphinxsidebar input { + border: 1px solid {{ theme_sidebarlinkcolor }}; + font-family: sans-serif; + font-size: 1em; +} + +{% if theme_collapsiblesidebar|tobool %} +/* for collapsible sidebar */ +div#sidebarbutton { + background-color: {{ theme_sidebarbtncolor }}; +} +{% endif %} + +/* -- hyperlink styles ------------------------------------------------------ */ + +a { + color: {{ theme_linkcolor }}; + text-decoration: none; +} + +a:visited { + color: {{ theme_visitedlinkcolor }}; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +{% if theme_externalrefs|tobool %} +a.external { + text-decoration: none; + border-bottom: 1px dashed {{ theme_linkcolor }}; +} + +a.external:hover { + text-decoration: none; + border-bottom: none; +} + +a.external:visited { + text-decoration: none; + border-bottom: 1px dashed {{ theme_visitedlinkcolor }}; +} +{% endif %} + +/* -- body styles ----------------------------------------------------------- */ + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: {{ theme_headfont }}; + background-color: {{ theme_headbgcolor }}; + font-weight: normal; + color: {{ theme_headtextcolor }}; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: {{ theme_headlinkcolor }}; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: {{ theme_headlinkcolor }}; + color: white; +} + +div.body p, div.body dd, div.body li { + text-align: justify; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: {{ theme_codebgcolor }}; + color: {{ theme_codetextcolor }}; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +tt { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +th { + background-color: #ede; +} + +.warning tt { + background: #efc2c2; +} + +.note tt { + background: #d6d6d6; +} + +.viewcode-back { + font-family: {{ theme_bodyfont }}; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} diff --git a/docs/source/_themes/finat/static/feature-item-1.png b/docs/source/_themes/finat/static/feature-item-1.png new file mode 100644 index 000000000..eac0e327a Binary files /dev/null and b/docs/source/_themes/finat/static/feature-item-1.png differ diff --git a/docs/source/_themes/finat/static/featured.css b/docs/source/_themes/finat/static/featured.css new file mode 100644 index 000000000..601103225 --- /dev/null +++ b/docs/source/_themes/finat/static/featured.css @@ -0,0 +1,74 @@ +#products_example { + width:400px; + height:282px; + position:relative; + float:left; +} + +/* + Slideshow +*/ + +#products { +} + +/* + Slides container + Important: + Set the width of your slides container + Set to display none, prevents content flash +*/ + +#products .slides_container { + width:366px; + overflow:hidden; + float:left; + position:relative; + display:none; +} + +/* + Each slide + Important: + Set the width of your slides + If height not specified height will be set by the slide content + Set to display block +*/ + +.slides_container a { + width:366px; + height:274px; + display:block; +} + +/* + Caption +*/ + +.caption { + letter-spacing:0; + position:relative; + text-shadow:0 1px 0 rgba(255,255,255,.8); + -webkit-font-smoothing: subpixel-antialiased; + z-index:500; + position:absolute; + bottom:-35px; + left:0; + height:30px; + padding-left: 10px; + padding-bottom: 20px; + background:#000; + background:rgba(0,0,0,.5); + width:400px; + font-size:1.0em; + line-height:1.33; + color:#fff; + border-top:1px solid #000; + text-shadow:none; +} + +.slides_container div.slide { + width:400px; + height:282px; + display:block; +} \ No newline at end of file diff --git a/docs/source/_themes/finat/static/feed-icon-14x14.gif b/docs/source/_themes/finat/static/feed-icon-14x14.gif new file mode 100644 index 000000000..99cd0c6b7 Binary files /dev/null and b/docs/source/_themes/finat/static/feed-icon-14x14.gif differ diff --git a/docs/source/_themes/finat/static/fenics-book-icon.png b/docs/source/_themes/finat/static/fenics-book-icon.png new file mode 100644 index 000000000..b3010c3e9 Binary files /dev/null and b/docs/source/_themes/finat/static/fenics-book-icon.png differ diff --git a/docs/source/_themes/finat/static/fenics-web.png b/docs/source/_themes/finat/static/fenics-web.png new file mode 100644 index 000000000..32880f955 Binary files /dev/null and b/docs/source/_themes/finat/static/fenics-web.png differ diff --git a/docs/source/_themes/finat/static/fenics.css_t b/docs/source/_themes/finat/static/fenics.css_t new file mode 100644 index 000000000..87f75eba2 --- /dev/null +++ b/docs/source/_themes/finat/static/fenics.css_t @@ -0,0 +1,674 @@ +/* + * fenics.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- FEniCS theme. + * This is a modified version of the pylons theme. + * + * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); +@import url("nobile-fontfacekit/stylesheet.css"); +@import url("neuton-fontfacekit/stylesheet.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: "Nobile", sans-serif; + font-size: 14px; + background-color: #ffffff; + color: #ffffff; + margin: 0; padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + width: 950px; + margin-left: auto; + margin-right: auto; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #fff; +} + +div.header{ + width:100%; + height:230px; + background: #f98131 url(headerbg.png) repeat-x 0 top; +} + +div.header-small{ + width:100%; + height:60px; + background: #f98131 url(headerbg.png) repeat-x 0 top; + border-bottom: 2px solid #ffffff; +} + +div.logo { + text-align: center; + padding-top: 50px; +} + +div.logo-small { + text-align: left; + padding: 10px 0 0 250px; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; + font-size: 14px; + overflow: auto; +} + +div.footer { + color: #aaa; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 13px; + background: #000; + clear:both; +} + +div.footer a { + color: #aaa; + text-decoration: none; +} + +div.footer a:hover { + color: #fff; +} + +div.related { + line-height: 30px; + color: #373839; + font-size: 12px; + background-color: #eee; +} + +div.related a { + color: #1b61d6; +} + +div.related ul { + padding-left: 240px; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 2px solid #000080; + background-color: #eee; + width: 300px; + float: right; + border-right-style: none; + border-left-style: none; + padding: 0px 0px; +} + +div.commit { + clear: both; + padding: 4px; +} + +div.commit.even { + background-color: #ddd; +} + +img.commit-author-img { + height: 48px; + width: 48px; + float: left; + margin-right: 10px; + display: none; +} + +em.property { + display: none; +} + +a.commit-link { + font-weight: bold; + padding-bottom: 4px; +} + +pre.commit { + clear: both; + font-size: 12px; +} + +p.sidebar-title { + font-weight: bold; +} + +div.youtube{ + text-align: center; +} + +/* -- body styles ----------------------------------------------------------- */ + +a, a .pre { + color: #000080; + text-decoration: none; + word-wrap: break-word; +} + +a:hover, a:hover .pre { + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: "NeutonRegular", sans-serif; + background-color: #ffffff; + font-weight: normal; + color: #222; + margin: 30px 0px 10px 0px; + padding: 5px 0; +} + +div.body h1 { + font-size: 2.2em; + line-height: 1; + margin-bottom: 0.4em; +} +div.body h2 { + font-size: 1.6em; + line-height: 1; + margin-bottom: 0.6em; +} +div.body h3 { + font-size: 1.2em; + line-height: 1; + margin-bottom: 0.8em; +} +div.body h4 { + font-size: 0.96em; + line-height: 1; + margin-bottom: 1.0em; +} +div.body h5 { + font-size: 0.8em; + font-weight: bold; + margin-bottom: 1.4em; +} +div.body h6 { + font-size: 0.6em; + font-weight: bold; + margin-bottom: 1.6em; +} + +a.headerlink { + color: #1b61d6; + font-size: 12px; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + text-decoration: underline; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + border: 2px solid #7a9eec; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; + background: #e1ecfe url(dialog-note.png) no-repeat 10px 8px; +} + +#finat-package #id1 { + display: none; +} + +#finat-package h2 { + background-color: #fafafa; + border: 2px solid #000080; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; +} + +#finat-package div.section>dl { + border: 2px solid #ddd; + border-right-style: none; + border-bottom-style: none; +} + +div.seealso { + background: #fff6bf url(dialog-seealso.png) no-repeat 10px 8px; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 20px 10px 60px; +} + +div.topic { + background: #eeeeee; + border: 2px solid #C6C9CB; + padding: 10px 20px; + border-right-style: none; + border-left-style: none; +} + +div.warning { + background: #fbe3e4 url(dialog-warning.png) no-repeat 10px 8px; + border: 2px solid #fbc2c4; + border-right-style: none; + border-left-style: none; + padding: 10px 20px 10px 60px; +} + +p.admonition-title { + display: none; +} + +p.admonition-title:after { + content: ":"; +} + +pre, code { + font-family: Consolas, "andale mono", "lucida console", monospace; + font-size: 14px; + line-height: 17px; + background-color: #fafafa; + padding: 10px; + margin: 0; +} + +tt { + background-color: transparent; + color: #222; + font-size: 14px; + font-family: Consolas, "andale mono", "lucida console", monospace; +} + +h2 tt.py-mod { + font-size: 1em; +} + +.viewcode-back { + font-family: "Nobile", sans-serif; +} + +div.viewcode-block:target { + background-color: #fff6bf; + border: 2px solid #ffd324; + border-left-style: none; + border-right-style: none; + padding: 10px 20px; +} + +table.imagegrid { + width: 100%; +} + +table.imagegrid td{ + text-align: center; +} + +table.docutils td { + padding: 0.5em; +} + +table.highlighttable { + width: 100%; + clear: both; +} + +table.highlighttable td { + padding: 0; +} + +a em.std-term { + color: #007f00; +} + +a:hover em.std-term { + text-decoration: underline; +} + +.download { + font-family: "Nobile", sans-serif; + font-weight: normal; + font-style: normal; +} + +tt.xref { + font-weight: normal; + font-style: normal; +} + +#access { + background: #000; + display: block; + float: left; + width: 100%; +} +#access .menu-header, +div.menu { + font-size: 13px; + margin-right: 20px; + width: 100%; +} +#access .menu-header ul, +div.menu ul { + float:right; + margin-right:10px; + list-style: none; + margin: 0; +} +#access .menu-header li, +div.menu li { + float: left; + position: relative; +} +#access a { + color: #aaa; + display: block; + line-height: 38px; + padding: 0 10px; + text-decoration: none; +} +#access ul ul { + box-shadow: 0px 3px 3px rgba(0,0,0,0.2); + -moz-box-shadow: 0px 3px 3px rgba(0,0,0,0.2); + -webkit-box-shadow: 0px 3px 3px rgba(0,0,0,0.2); + display: none; + position: absolute; + top: 38px; + left: 0; + float: left; + width: 180px; + z-index: 99999; +} +#access ul ul li { + min-width: 180px; +} +#access ul ul ul { + left: 100%; + top: 0; +} +#access ul ul a { + background: #333; + line-height: 1em; + padding: 10px; + width: 160px; + height: auto; +} +#access li:hover > a, +#access ul ul :hover > a { + background: #333; + color: #fff; +} +#access ul li:hover > ul { + display: block; +} +#access ul li.current_page_item > a, +#access ul li.current-menu-ancestor > a, +#access ul li.current-menu-item > a, +#access ul li.current-menu-parent > a { + color: #fff; +} +* html #access ul li.current_page_item a, +* html #access ul li.current-menu-ancestor a, +* html #access ul li.current-menu-item a, +* html #access ul li.current-menu-parent a, +* html #access ul li a:hover { + color: #fff; +} + +.wrapper { + margin: 0 auto; + width: 900px; +} + +/* =Leader and Front Page Styles +-------------------------------------------------------------- */ + +#leader { + border-bottom:1px solid #ccc; + padding:30px 0 40px 0; +} +#leader-container { + margin:0 auto; + overflow:hidden; + position:relative; + width:870px; +} +#leader .entry-title { + font-size:40px; + line-height:45px; + margin-top:-8px; + padding:0 0 14px 0; +} +#leader .entry-title span { + font-family:Georgia,serif; + font-weight:normal; + font-style:italic; +} +.single #leader .entry-title { + width:652px; +} +#leader .entry-meta { + position:absolute; + top:15px; + left:690px; +} + +#container, +#content { + margin:0; + padding:0; + width:100%; +} +#container { + margin-top:-21px; +} + +ul#recent-items{ +padding-left: 1em; +} + +#sub-feature { + font-size:13px; + line-height:18px; + position:relative; + overflow:hidden; +} +#sub-feature p { + margin:0 0 18px 0; +} + +#sub-feature h3 img { + position:absolute; + top:3px; + right:0; +} +.block { + float:left; + width:400px; +} +#front-block-1 { + margin-right:20px; +} + +.block .avatar { + float:left; + margin:.55em 5px 0 0; +} +.block .avatar-84 { + margin:.25em 10px 0 0; +} + +.block ul { + border-top:1px solid #ccc; + list-style:none; + margin:0; +} +.block ul li { + display:inline; +} +.block ul li a { + border-bottom:1px solid #ccc; + color:#667; + display:block; + padding:6px 0; + text-decoration:none; +} +.block ul li a:hover, +.block ul li a:active { + background:#fafafa; + color: #FF4B33; +} +.page .entry-content, +.single .entry-content { + padding-top:0; +} + +/* =Global Elements +-------------------------------------------------------------- */ + +#buttons { + padding:.75em 0; +} +a.button { + border:1px solid #ccc; + -webkit-border-radius: .7em; + -moz-border-radius: .7em; + border-radius: .7em; + color:#667; + font-size:13px; + margin:0 10px 0 0; + padding:.75em 1.25em; + text-decoration:none; +} +a.button:hover, +a.button:active { + color: #FF4B33; +} + +.footer-nav { + text-align:left; + height:9em; +} +.footer-nav h4 { + margin-left: 2em; + margin-top: 0; + margin-bottom: 0; +} +div.span-6 { + display:inline; + float:left; + margin-right:10px; + margin-left: 20px; +} +div.last { + float:right; +} + +.footer-nav ul li{ + padding: 0.2em; +} + +.search { +margin-left:20px; +margin-top: 1em; +} + +@media print { +div.wrapper { +display: none; +} +} + +.mugshot +{ + padding-right: 1em; + height: 19em; + padding-left: 1em; +} + +.mugshot img +{ + width: 100px; + height: 150px; + border: solid 1px #667; +} + +.app { + height: 20em; + margin-right: 1em; +} + +#the-finat-team td { + text-align: center; +} + +/* Hack to disable the travis feed logo on the obtaining pyop2 page. */ +img[alt="build status"] { + display: none; +} + +/* Style elements controlling the RSS feeds */ + +/*Container*/ +/*Header*/ +#header .feed_title { + margin:10px 0px 10px 0px; + /*padding:10px 5px 50px 5px;*/ + font-weight:bold; + font-size:16px; +} + +.feed_item { + margin:5px 0px 5px 0px; + background: #eee; + color: #3E4349; +} + +.feed_item_date { + font-size: 12px; +} + +/* Equispace the logos. */ +#finat-is-supported-by tr { + width: 100%; +} + +/* Equispace the logos. */ +#finat-is-supported-by td { + width: 25%; + text-align: center; +} diff --git a/docs/source/_themes/finat/static/footerbg.png b/docs/source/_themes/finat/static/footerbg.png new file mode 100644 index 000000000..1fbc873da Binary files /dev/null and b/docs/source/_themes/finat/static/footerbg.png differ diff --git a/docs/source/_themes/finat/static/headerbg.png b/docs/source/_themes/finat/static/headerbg.png new file mode 100644 index 000000000..ccec153a1 Binary files /dev/null and b/docs/source/_themes/finat/static/headerbg.png differ diff --git a/docs/source/_themes/finat/static/icon.ico b/docs/source/_themes/finat/static/icon.ico new file mode 100644 index 000000000..a9cbeca07 Binary files /dev/null and b/docs/source/_themes/finat/static/icon.ico differ diff --git a/docs/source/_themes/finat/static/icon.png b/docs/source/_themes/finat/static/icon.png new file mode 100644 index 000000000..b4eae264a Binary files /dev/null and b/docs/source/_themes/finat/static/icon.png differ diff --git a/docs/source/_themes/finat/static/ie6.css b/docs/source/_themes/finat/static/ie6.css new file mode 100644 index 000000000..74baa5d5a --- /dev/null +++ b/docs/source/_themes/finat/static/ie6.css @@ -0,0 +1,7 @@ +* html img, +* html .png{position:relative;behavior:expression((this.runtimeStyle.behavior="none")&&(this.pngSet?this.pngSet=true:(this.nodeName == "IMG" && this.src.toLowerCase().indexOf('.png')>-1?(this.runtimeStyle.backgroundImage = "none", +this.runtimeStyle.filter = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='" + this.src + "',sizingMethod='image')", +this.src = "_static/transparent.gif"):(this.origBg = this.origBg? this.origBg :this.currentStyle.backgroundImage.toString().replace('url("','').replace('")',''), +this.runtimeStyle.filter = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='" + this.origBg + "',sizingMethod='crop')", +this.runtimeStyle.backgroundImage = "none")),this.pngSet=true) +);} diff --git a/docs/source/_themes/finat/static/jquery.latest-commit.js b/docs/source/_themes/finat/static/jquery.latest-commit.js new file mode 100644 index 000000000..73973b765 --- /dev/null +++ b/docs/source/_themes/finat/static/jquery.latest-commit.js @@ -0,0 +1,38 @@ +// Based on http://github.com/skulbuny/jquery.latest-commit.js by Sean Clayton +jQuery(document).ready(function($){ + $('.latest-commit').each(function(){ // Attach to any div/section/whatever with this class + var $container = $(this), $commit, + repo = $container.data('github'), + username = repo.split('/')[0], + repoName = repo.split('/')[1], + userUrl = "http://github.com/" + username, // Gets your user url + repoUrl = "http://github.com/" + username + '/' + repoName; // Gets your repo url + $.ajax({ + url: 'https://api.github.com/repos/' + repo + '/commits?per_page=' + $container.data('commits'), + dataType: 'jsonp', + success: function(results) { + for (i = 0; i < results.data.length; ++i) { + $commit = $( + '
' + // Needs to be wrapped. + // ADD DESIRED CLASSES TO HTML TAGS BELOW! + '' + // Commit author image + '
' + + '
' + // First line of commit message + '' + // Link to commit author + ' authored at ' + // Outputs the commit date + '
' + + '
' + ); + var repo = results.data[i]; + var commitUrl = repo.html_url; // Grabs URL of the commit + $commit.find('.commit-author-img').attr('src', repo.author.avatar_url); // Add commit author avatar image + $commit.find('.commit-link').attr('href',commitUrl).text(repo.commit.message.split("\n")[0]); // Adds link to commit and commit SHA + $commit.find('.commit-author').attr('href', repo.author.html_url).text(repo.commit.author.name); // Outputs commit author name + $commit.find('.commit-date').text(new Date(repo.commit.author.date).toLocaleString()); // Outputs commit date + $commit.appendTo($container); + } + $('.commit:even').addClass('even'); + } + }); + }); +}); diff --git a/docs/source/_themes/finat/static/middlebg.png b/docs/source/_themes/finat/static/middlebg.png new file mode 100644 index 000000000..2369cfb7d Binary files /dev/null and b/docs/source/_themes/finat/static/middlebg.png differ diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/Apache License Version 2.txt b/docs/source/_themes/finat/static/neuton-fontfacekit/Apache License Version 2.txt new file mode 100755 index 000000000..4df74b8ce --- /dev/null +++ b/docs/source/_themes/finat/static/neuton-fontfacekit/Apache License Version 2.txt @@ -0,0 +1,53 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.eot b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.eot new file mode 100755 index 000000000..b964bbe02 Binary files /dev/null and b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.eot differ diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.svg b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.svg new file mode 100755 index 000000000..e93e27383 --- /dev/null +++ b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.svg @@ -0,0 +1,145 @@ + + + + +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Copyright c 2010 Brian Zick http21326infowith Reserved Font Name NeutonLicensed under the Apache License Version 20 the Licenseyou may not use this file except in compliance with the LicenseYou may obtain a copy of the License at httpwwwapacheorglicensesLICENSE20Unless required by applicable law or agreed to in writing softwaredistributed under the License is distributed on an AS IS BASISWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or impliedSee the License for the specific language governing permissions andlimitations under the License + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.ttf b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.ttf new file mode 100755 index 000000000..c34819496 Binary files /dev/null and b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.ttf differ diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.woff b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.woff new file mode 100755 index 000000000..d3d61a7c4 Binary files /dev/null and b/docs/source/_themes/finat/static/neuton-fontfacekit/Neuton-webfont.woff differ diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/demo.html b/docs/source/_themes/finat/static/neuton-fontfacekit/demo.html new file mode 100755 index 000000000..b207dc284 --- /dev/null +++ b/docs/source/_themes/finat/static/neuton-fontfacekit/demo.html @@ -0,0 +1,33 @@ + + + + + + + Font Face Demo + + + + + +
+

Font-face Demo for the Neuton Font

+ + + +

Neuton Regular - Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+ +
+ + diff --git a/docs/source/_themes/finat/static/neuton-fontfacekit/stylesheet.css b/docs/source/_themes/finat/static/neuton-fontfacekit/stylesheet.css new file mode 100755 index 000000000..ef4859e68 --- /dev/null +++ b/docs/source/_themes/finat/static/neuton-fontfacekit/stylesheet.css @@ -0,0 +1,16 @@ +/* Generated by Font Squirrel (http://www.fontsquirrel.com) on June 12, 2011 05:33:46 AM America/New_York */ + + + +@font-face { + font-family: 'NeutonRegular'; + src: url('Neuton-webfont.eot'); + src: url('Neuton-webfont.eot?#iefix') format('embedded-opentype'), + url('Neuton-webfont.woff') format('woff'), + url('Neuton-webfont.ttf') format('truetype'), + url('Neuton-webfont.svg#NeutonRegular') format('svg'); + font-weight: normal; + font-style: normal; + +} + diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/SIL Open Font License 1.1.txt b/docs/source/_themes/finat/static/nobile-fontfacekit/SIL Open Font License 1.1.txt new file mode 100755 index 000000000..e4b0c4ff5 --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/SIL Open Font License 1.1.txt @@ -0,0 +1,91 @@ +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. \ No newline at end of file diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/demo.html b/docs/source/_themes/finat/static/nobile-fontfacekit/demo.html new file mode 100755 index 000000000..983298770 --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/demo.html @@ -0,0 +1,48 @@ + + + + + + + Font Face Demo + + + + + +
+

Font-face Demo for the Nobile Font

+ + + +

Nobile Regular - Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+ + + +

Nobile Italic - Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+ + + +

Nobile Bold - Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+ + + +

Nobile Bold Italic - Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+ +
+ + diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.eot b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.eot new file mode 100755 index 000000000..4c06b605e Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.eot differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.svg b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.svg new file mode 100755 index 000000000..2ef6265ad --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.svg @@ -0,0 +1,149 @@ + + + + +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Copyright c 20072010 by vernon adams All rights reserved +Foundry : vernon adams + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.ttf b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.ttf new file mode 100755 index 000000000..1a607e95a Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.ttf differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.woff b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.woff new file mode 100755 index 000000000..cf6959b4d Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile-webfont.woff differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.eot b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.eot new file mode 100755 index 000000000..3e442e04c Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.eot differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.svg b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.svg new file mode 100755 index 000000000..cf5bab40c --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.svg @@ -0,0 +1,148 @@ + + + + +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Copyright c 200710 by vernon adams All rights reserved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.ttf b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.ttf new file mode 100755 index 000000000..07aa902a4 Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.ttf differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.woff b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.woff new file mode 100755 index 000000000..3324f0244 Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold-webfont.woff differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.eot b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.eot new file mode 100755 index 000000000..f05b579dd Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.eot differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.svg b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.svg new file mode 100755 index 000000000..26f5440eb --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.svg @@ -0,0 +1,148 @@ + + + + +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Copyright c 2007 by vernon adams All rights reserved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.ttf b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.ttf new file mode 100755 index 000000000..54dedd83b Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.ttf differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.woff b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.woff new file mode 100755 index 000000000..d88da7512 Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_bold_italic-webfont.woff differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.eot b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.eot new file mode 100755 index 000000000..c8f700921 Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.eot differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.svg b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.svg new file mode 100755 index 000000000..6d6baa3cf --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.svg @@ -0,0 +1,148 @@ + + + + +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Copyright c 20072010 by vernon adams All rights reserved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.ttf b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.ttf new file mode 100755 index 000000000..ff56b0e0a Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.ttf differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.woff b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.woff new file mode 100755 index 000000000..af1582bbf Binary files /dev/null and b/docs/source/_themes/finat/static/nobile-fontfacekit/nobile_italic-webfont.woff differ diff --git a/docs/source/_themes/finat/static/nobile-fontfacekit/stylesheet.css b/docs/source/_themes/finat/static/nobile-fontfacekit/stylesheet.css new file mode 100755 index 000000000..10d0a7c52 --- /dev/null +++ b/docs/source/_themes/finat/static/nobile-fontfacekit/stylesheet.css @@ -0,0 +1,52 @@ +/* Generated by Font Squirrel (http://www.fontsquirrel.com) on June 12, 2011 05:30:25 AM America/New_York */ + + + +@font-face { + font-family: 'NobileRegular'; + src: url('nobile-webfont.eot'); + src: url('nobile-webfont.eot?#iefix') format('embedded-opentype'), + url('nobile-webfont.woff') format('woff'), + url('nobile-webfont.ttf') format('truetype'), + url('nobile-webfont.svg#NobileRegular') format('svg'); + font-weight: normal; + font-style: normal; + +} + +@font-face { + font-family: 'NobileItalic'; + src: url('nobile_italic-webfont.eot'); + src: url('nobile_italic-webfont.eot?#iefix') format('embedded-opentype'), + url('nobile_italic-webfont.woff') format('woff'), + url('nobile_italic-webfont.ttf') format('truetype'), + url('nobile_italic-webfont.svg#NobileItalic') format('svg'); + font-weight: normal; + font-style: normal; + +} + +@font-face { + font-family: 'NobileBold'; + src: url('nobile_bold-webfont.eot'); + src: url('nobile_bold-webfont.eot?#iefix') format('embedded-opentype'), + url('nobile_bold-webfont.woff') format('woff'), + url('nobile_bold-webfont.ttf') format('truetype'), + url('nobile_bold-webfont.svg#NobileBold') format('svg'); + font-weight: normal; + font-style: normal; + +} + +@font-face { + font-family: 'NobileBoldItalic'; + src: url('nobile_bold_italic-webfont.eot'); + src: url('nobile_bold_italic-webfont.eot?#iefix') format('embedded-opentype'), + url('nobile_bold_italic-webfont.woff') format('woff'), + url('nobile_bold_italic-webfont.ttf') format('truetype'), + url('nobile_bold_italic-webfont.svg#NobileBoldItalic') format('svg'); + font-weight: normal; + font-style: normal; + +} + diff --git a/docs/source/_themes/finat/static/sample-news-image.png b/docs/source/_themes/finat/static/sample-news-image.png new file mode 100644 index 000000000..0534438d3 Binary files /dev/null and b/docs/source/_themes/finat/static/sample-news-image.png differ diff --git a/docs/source/_themes/finat/static/social-buttons.html b/docs/source/_themes/finat/static/social-buttons.html new file mode 100644 index 000000000..c2cdef39e --- /dev/null +++ b/docs/source/_themes/finat/static/social-buttons.html @@ -0,0 +1,44 @@ + + + + + + A test of social buttons + + + +
+ + + + +
+
+

Let’s be social!

+
+
+
+
+
+

Please test these buttons by logging into your social profiles.

+
+ +
+ + diff --git a/docs/source/_themes/finat/static/transparent.gif b/docs/source/_themes/finat/static/transparent.gif new file mode 100644 index 000000000..0341802e5 Binary files /dev/null and b/docs/source/_themes/finat/static/transparent.gif differ diff --git a/docs/source/_themes/finat/static/unknown.png b/docs/source/_themes/finat/static/unknown.png new file mode 100644 index 000000000..f97413690 Binary files /dev/null and b/docs/source/_themes/finat/static/unknown.png differ diff --git a/docs/source/_themes/finat/theme.conf b/docs/source/_themes/finat/theme.conf new file mode 100644 index 000000000..6bc0468d3 --- /dev/null +++ b/docs/source/_themes/finat/theme.conf @@ -0,0 +1,10 @@ +[theme] +inherit = basic +stylesheet = fenics.css +pygments_style = fenics_theme_support.FenicsStyle + +[options] +highlight_language = guess +short_title = Home +logo = icon.png +favicon = icon.ico diff --git a/doc/sphinx/source/conf.py b/docs/source/conf.py similarity index 90% rename from doc/sphinx/source/conf.py rename to docs/source/conf.py index f97faaf30..9963d35cd 100644 --- a/doc/sphinx/source/conf.py +++ b/docs/source/conf.py @@ -128,11 +128,6 @@ # pixels large. #html_favicon = None -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. @@ -268,22 +263,3 @@ 'numpy': ('https://numpy.org/doc/stable/', None), 'python': ('https://docs.python.org/3/', None), } - - -def run_apidoc(_): - modules = ['FIAT'] - - # Get location of Sphinx files - sphinx_source_dir = os.path.abspath(os.path.dirname(__file__)) - repo_dir = os.path.abspath(os.path.join(sphinx_source_dir, os.path.pardir, - os.path.pardir, os.path.pardir)) - apidoc_dir = os.path.join(sphinx_source_dir, "api-doc") - - from sphinx.ext.apidoc import main - for module in modules: - # Generate .rst files ready for autodoc - module_dir = os.path.join(repo_dir, module) - main(["-f", "-d", "1", "-o", apidoc_dir, module_dir]) - -def setup(app): - app.connect('builder-inited', run_apidoc) diff --git a/doc/sphinx/source/index.rst b/docs/source/index.rst similarity index 84% rename from doc/sphinx/source/index.rst rename to docs/source/index.rst index 9dc306dc2..1deda454c 100644 --- a/doc/sphinx/source/index.rst +++ b/docs/source/index.rst @@ -28,10 +28,7 @@ Documentation installation manual - API reference releases - -[FIXME: These links don't belong here, should go under API reference somehow.] - -* :ref:`genindex` -* :ref:`modindex` + FIAT + finat + gem diff --git a/doc/sphinx/source/installation.rst b/docs/source/installation.rst similarity index 100% rename from doc/sphinx/source/installation.rst rename to docs/source/installation.rst diff --git a/doc/sphinx/source/manual.rst b/docs/source/manual.rst similarity index 100% rename from doc/sphinx/source/manual.rst rename to docs/source/manual.rst diff --git a/doc/sphinx/source/releases.rst b/docs/source/releases.rst similarity index 100% rename from doc/sphinx/source/releases.rst rename to docs/source/releases.rst diff --git a/doc/sphinx/source/releases/next.rst b/docs/source/releases/next.rst similarity index 100% rename from doc/sphinx/source/releases/next.rst rename to docs/source/releases/next.rst diff --git a/doc/sphinx/source/releases/v1.6.0.rst b/docs/source/releases/v1.6.0.rst similarity index 100% rename from doc/sphinx/source/releases/v1.6.0.rst rename to docs/source/releases/v1.6.0.rst diff --git a/doc/sphinx/source/releases/v2016.1.0.rst b/docs/source/releases/v2016.1.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2016.1.0.rst rename to docs/source/releases/v2016.1.0.rst diff --git a/doc/sphinx/source/releases/v2016.2.0.rst b/docs/source/releases/v2016.2.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2016.2.0.rst rename to docs/source/releases/v2016.2.0.rst diff --git a/doc/sphinx/source/releases/v2017.1.0.post1.rst b/docs/source/releases/v2017.1.0.post1.rst similarity index 100% rename from doc/sphinx/source/releases/v2017.1.0.post1.rst rename to docs/source/releases/v2017.1.0.post1.rst diff --git a/doc/sphinx/source/releases/v2017.1.0.rst b/docs/source/releases/v2017.1.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2017.1.0.rst rename to docs/source/releases/v2017.1.0.rst diff --git a/doc/sphinx/source/releases/v2017.2.0.rst b/docs/source/releases/v2017.2.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2017.2.0.rst rename to docs/source/releases/v2017.2.0.rst diff --git a/doc/sphinx/source/releases/v2018.1.0.rst b/docs/source/releases/v2018.1.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2018.1.0.rst rename to docs/source/releases/v2018.1.0.rst diff --git a/doc/sphinx/source/releases/v2019.1.0.rst b/docs/source/releases/v2019.1.0.rst similarity index 100% rename from doc/sphinx/source/releases/v2019.1.0.rst rename to docs/source/releases/v2019.1.0.rst diff --git a/finat/__init__.py b/finat/__init__.py new file mode 100644 index 000000000..cad018212 --- /dev/null +++ b/finat/__init__.py @@ -0,0 +1,48 @@ +from .fiat_elements import Bernstein # noqa: F401 +from .fiat_elements import Bubble, CrouzeixRaviart, DiscontinuousTaylor # noqa: F401 +from .fiat_elements import Lagrange, DiscontinuousLagrange, Real # noqa: F401 +from .fiat_elements import DPC, Serendipity, BrezziDouglasMariniCubeEdge, BrezziDouglasMariniCubeFace # noqa: F401 +from .fiat_elements import TrimmedSerendipityFace, TrimmedSerendipityEdge # noqa: F401 +from .fiat_elements import TrimmedSerendipityDiv # noqa: F401 +from .fiat_elements import TrimmedSerendipityCurl # noqa: F401 +from .fiat_elements import BrezziDouglasMarini, BrezziDouglasFortinMarini # noqa: F401 +from .fiat_elements import Nedelec, NedelecSecondKind, RaviartThomas # noqa: F401 +from .fiat_elements import HellanHerrmannJohnson, Regge # noqa: F401 +from .fiat_elements import GopalakrishnanLedererSchoberlFirstKind # noqa: F401 +from .fiat_elements import GopalakrishnanLedererSchoberlSecondKind # noqa: F401 +from .fiat_elements import FacetBubble # noqa: F401 +from .fiat_elements import KongMulderVeldhuizen # noqa: F401 + +from .argyris import Argyris # noqa: F401 +from .aw import ArnoldWinther # noqa: F401 +from .aw import ArnoldWintherNC # noqa: F401 +from .hz import HuZhang # noqa: F401 +from .bell import Bell # noqa: F401 +from .bernardi_raugel import BernardiRaugel, BernardiRaugelBubble # noqa: F401 +from .hct import HsiehCloughTocher, ReducedHsiehCloughTocher # noqa: F401 +from .arnold_qin import ArnoldQin, ReducedArnoldQin # noqa: F401 +from .christiansen_hu import ChristiansenHu # noqa: F401 +from .alfeld_sorokina import AlfeldSorokina # noqa: F401 +from .guzman_neilan import GuzmanNeilanFirstKindH1, GuzmanNeilanSecondKindH1, GuzmanNeilanBubble, GuzmanNeilanH1div # noqa: F401 +from .powell_sabin import QuadraticPowellSabin6, QuadraticPowellSabin12 # noqa: F401 +from .hermite import Hermite # noqa: F401 +from .johnson_mercier import JohnsonMercier # noqa: F401 +from .mtw import MardalTaiWinther # noqa: F401 +from .morley import Morley # noqa: F401 +from .trace import HDivTrace # noqa: F401 +from .direct_serendipity import DirectSerendipity # noqa: F401 + +from .spectral import GaussLobattoLegendre, GaussLegendre, Legendre, IntegratedLegendre, FDMLagrange, FDMQuadrature, FDMDiscontinuousLagrange, FDMBrokenH1, FDMBrokenL2, FDMHermite # noqa: F401 +from .tensorfiniteelement import TensorFiniteElement # noqa: F401 +from .tensor_product import TensorProductElement # noqa: F401 +from .cube import FlattenedDimensions # noqa: F401 +from .discontinuous import DiscontinuousElement # noqa: F401 +from .enriched import EnrichedElement # noqa: F401 +from .hdivcurl import HCurlElement, HDivElement # noqa: F401 +from .mixed import MixedElement # noqa: F401 +from .nodal_enriched import NodalEnrichedElement # noqa: 401 +from .quadrature_element import QuadratureElement, make_quadrature_element # noqa: F401 +from .restricted import RestrictedElement # noqa: F401 +from .runtime_tabulated import RuntimeTabulated # noqa: F401 +from . import quadrature # noqa: F401 +from . import cell_tools # noqa: F401 diff --git a/finat/alfeld_sorokina.py b/finat/alfeld_sorokina.py new file mode 100644 index 000000000..8e1275b38 --- /dev/null +++ b/finat/alfeld_sorokina.py @@ -0,0 +1,41 @@ +import FIAT +import numpy +from gem import ListTensor + +from finat.fiat_elements import FiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.piola_mapped import piola_inverse + + +class AlfeldSorokina(PhysicallyMappedElement, FiatElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("AlfeldSorokina2016") + super().__init__(FIAT.AlfeldSorokina(cell, degree)) + + def basis_transformation(self, coordinate_mapping): + sd = self.cell.get_spatial_dimension() + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + + dofs = self.entity_dofs() + V = identity(self.space_dimension()) + + # Undo the Piola transform + nodes = self._element.get_dual_set().nodes + Finv = piola_inverse(self.cell, J, detJ) + for dim in sorted(dofs): + for e in sorted(dofs[dim]): + k = 0 + while k < len(dofs[dim][e]): + cur = dofs[dim][e][k] + if len(nodes[cur].deriv_dict) > 0: + V[cur, cur] = detJ + k += 1 + else: + s = dofs[dim][e][k:k+sd] + V[numpy.ix_(s, s)] = Finv + k += sd + + return ListTensor(V.T) diff --git a/finat/argyris.py b/finat/argyris.py new file mode 100644 index 000000000..562ae41b2 --- /dev/null +++ b/finat/argyris.py @@ -0,0 +1,169 @@ +import numpy +from math import comb + +import FIAT + +from gem import Literal, ListTensor + +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement + + +def _vertex_transform(V, vorder, fiat_cell, coordinate_mapping): + """Basis transformation for evaluation, gradient, and hessian at vertices.""" + sd = fiat_cell.get_spatial_dimension() + top = fiat_cell.get_topology() + bary, = fiat_cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + + gdofs = sd + G = [[J[j, i] for j in range(sd)] for i in range(sd)] + + if vorder < 2: + hdofs = 0 + H = [[]] + else: + hdofs = (sd*(sd+1))//2 + indices = [(i, j) for i in range(sd) for j in range(i, sd)] + H = numpy.zeros((hdofs, hdofs), dtype=object) + for p, (i, j) in enumerate(indices): + for q, (m, n) in enumerate(indices): + H[p, q] = J[m, i] * J[n, j] + J[m, j] * J[n, i] + H[:, [i == j for i, j in indices]] *= 0.5 + + s = 0 + for v in sorted(top[0]): + s += 1 + V[s:s+gdofs, s:s+gdofs] = G + s += gdofs + V[s:s+hdofs, s:s+hdofs] = H + s += hdofs + return V + + +def _normal_tangential_transform(fiat_cell, J, detJ, f): + R = numpy.array([[0, 1], [-1, 0]]) + that = fiat_cell.compute_edge_tangent(f) + nhat = R @ that + Jn = J @ Literal(nhat) + Jt = J @ Literal(that) + alpha = Jn @ Jt + beta = Jt @ Jt + Bnn = detJ / beta + Bnt = alpha / beta + + Lhat = numpy.linalg.norm(that) + Bnn = Bnn * Lhat + Bnt = Bnt / Lhat + return Bnn, Bnt, Jt + + +def _edge_transform(V, vorder, eorder, fiat_cell, coordinate_mapping, avg=False): + """Basis transformation for integral edge moments. + + :arg V: the transpose of the basis transformation. + :arg vorder: the jet order at vertices, matching the Jacobi weights in the + normal derivative moments on edges. + :arg eorder: the order of the normal derivative moments. + :arg fiat_cell: the reference triangle. + :arg coordinate_mapping: the coordinate mapping. + :kwarg avg: are we scaling integrals by dividing by the edge length? + """ + sd = fiat_cell.get_spatial_dimension() + bary, = fiat_cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + pel = coordinate_mapping.physical_edge_lengths() + + # number of DOFs per vertex/edge + voffset = comb(sd + vorder, vorder) + eoffset = 2 * eorder + 1 + top = fiat_cell.get_topology() + for e in sorted(top[1]): + Bnn, Bnt, Jt = _normal_tangential_transform(fiat_cell, J, detJ, e) + if avg: + Bnn = Bnn * pel[e] + + v0id, v1id = (v * voffset for v in top[1][e]) + s0 = len(top[0]) * voffset + e * eoffset + for k in range(eorder+1): + s = s0 + k + # Jacobi polynomial at the endpoints + P1 = comb(k + vorder, k) + P0 = -(-1)**k * P1 + V[s, s] = Bnn + V[s, v1id] = P1 * Bnt + V[s, v0id] = P0 * Bnt + if k > 0: + V[s, s + eorder] = -1 * Bnt + + +class Argyris(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=5, variant=None, avg=False): + if Citations is not None: + Citations().register("Argyris1968") + if variant is None: + variant = "integral" + if variant == "point" and degree != 5: + raise NotImplementedError("Degree must be 5 for 'point' variant of Argyris") + fiat_element = FIAT.Argyris(cell, degree, variant=variant) + self.variant = variant + self.avg = avg + super().__init__(fiat_element) + + def basis_transformation(self, coordinate_mapping): + sd = self.cell.get_spatial_dimension() + top = self.cell.get_topology() + + V = identity(self.space_dimension()) + + vorder = 2 + voffset = comb(sd + vorder, vorder) + eorder = self.degree - 5 + + _vertex_transform(V, vorder, self.cell, coordinate_mapping) + if self.variant == "integral": + _edge_transform(V, vorder, eorder, self.cell, coordinate_mapping, avg=self.avg) + else: + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + pel = coordinate_mapping.physical_edge_lengths() + for e in sorted(top[1]): + s = len(top[0]) * voffset + e * (eorder+1) + v0id, v1id = (v * voffset for v in top[1][e]) + Bnn, Bnt, Jt = _normal_tangential_transform(self.cell, J, detJ, e) + + # edge midpoint normal derivative + V[s, s] = Bnn * pel[e] + + # vertex points + V[s, v1id] = 15/8 * Bnt + V[s, v0id] = -1 * V[s, v1id] + + # vertex derivatives + for i in range(sd): + V[s, v1id+1+i] = -7/16 * Bnt * Jt[i] + V[s, v0id+1+i] = V[s, v1id+1+i] + + # second derivatives + tau = [Jt[0]*Jt[0], 2*Jt[0]*Jt[1], Jt[1]*Jt[1]] + for i in range(len(tau)): + V[s, v1id+3+i] = 1/32 * Bnt * tau[i] + V[s, v0id+3+i] = -1 * V[s, v1id+3+i] + + # Patch up conditioning + h = coordinate_mapping.cell_size() + for v in sorted(top[0]): + s = voffset*v + 1 + V[:, s:s+sd] *= 1 / h[v] + V[:, s+sd:voffset*(v+1)] *= 1 / (h[v]*h[v]) + + if self.variant == "point": + eoffset = 2 * eorder + 1 + for e in sorted(top[1]): + v0, v1 = top[1][e] + s = len(top[0]) * voffset + e * eoffset + V[:, s:s+eorder+1] *= 2 / (h[v0] + h[v1]) + + return ListTensor(V.T) diff --git a/finat/arnold_qin.py b/finat/arnold_qin.py new file mode 100644 index 000000000..2d84eea1a --- /dev/null +++ b/finat/arnold_qin.py @@ -0,0 +1,19 @@ +import FIAT + +from finat.physically_mapped import Citations +from finat.fiat_elements import FiatElement +from finat.piola_mapped import PiolaBubbleElement + + +class ArnoldQin(FiatElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("ArnoldQin1992") + super().__init__(FIAT.ArnoldQin(cell, degree)) + + +class ReducedArnoldQin(PiolaBubbleElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("ArnoldQin1992") + super().__init__(FIAT.ArnoldQin(cell, degree, reduced=True)) diff --git a/finat/aw.py b/finat/aw.py new file mode 100644 index 000000000..80d7b7ea1 --- /dev/null +++ b/finat/aw.py @@ -0,0 +1,137 @@ +"""Implementation of the Arnold-Winther finite elements.""" +import FIAT +import numpy +from gem import ListTensor + +from finat.fiat_elements import FiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.piola_mapped import adjugate, normal_tangential_edge_transform, normal_tangential_face_transform + + +def _facet_transform(fiat_cell, facet_moment_degree, coordinate_mapping): + sd = fiat_cell.get_spatial_dimension() + top = fiat_cell.get_topology() + num_facets = len(top[sd-1]) + dimPk_facet = FIAT.expansions.polynomial_dimension( + fiat_cell.construct_subelement(sd-1), facet_moment_degree) + dofs_per_facet = sd * dimPk_facet + ndofs = num_facets * dofs_per_facet + V = identity(ndofs) + + bary, = fiat_cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + if sd == 2: + transform = normal_tangential_edge_transform + elif sd == 3: + transform = normal_tangential_face_transform + + for f in range(num_facets): + rows = transform(fiat_cell, J, detJ, f) + for i in range(dimPk_facet): + s = dofs_per_facet*f + i * sd + V[s+1:s+sd, s:s+sd] = rows + return V + + +def _evaluation_transform(fiat_cell, coordinate_mapping): + sd = fiat_cell.get_spatial_dimension() + bary, = fiat_cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + K = adjugate([[J[i, j] for j in range(sd)] for i in range(sd)]) + + indices = [(i, j) for i in range(sd) for j in range(i, sd)] + ncomp = len(indices) + W = numpy.zeros((ncomp, ncomp), dtype=object) + for p, (i, j) in enumerate(indices): + for q, (m, n) in enumerate(indices): + W[p, q] = 0.5*(K[i, m] * K[j, n] + K[j, m] * K[i, n]) + W[:, [i != j for i, j in indices]] *= 2 + return W + + +class ArnoldWintherNC(PhysicallyMappedElement, FiatElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("Arnold2003") + super().__init__(FIAT.ArnoldWintherNC(cell, degree)) + + def basis_transformation(self, coordinate_mapping): + """Note, the extra 3 dofs which are removed here + correspond to the constraints.""" + numbf = self._element.space_dimension() + ndof = self.space_dimension() + V = identity(numbf, ndof) + + V[:12, :12] = _facet_transform(self.cell, 1, coordinate_mapping) + + # Note: that the edge DOFs are scaled by edge lengths in FIAT implies + # that they are already have the necessary rescaling to improve + # conditioning. + + return ListTensor(V.T) + + def entity_dofs(self): + return {0: {0: [], + 1: [], + 2: []}, + 1: {0: [0, 1, 2, 3], 1: [4, 5, 6, 7], 2: [8, 9, 10, 11]}, + 2: {0: [12, 13, 14]}} + + @property + def index_shape(self): + return (self.space_dimension(),) + + def space_dimension(self): + return 15 + + +class ArnoldWinther(PhysicallyMappedElement, FiatElement): + def __init__(self, cell, degree=3): + if Citations is not None: + Citations().register("Arnold2002") + super().__init__(FIAT.ArnoldWinther(cell, degree)) + + def basis_transformation(self, coordinate_mapping): + # The extra 6 dofs removed here correspond to the constraints + numbf = self._element.space_dimension() + ndof = self.space_dimension() + V = identity(numbf, ndof) + + sd = self.cell.get_spatial_dimension() + W = _evaluation_transform(self.cell, coordinate_mapping) + ncomp = W.shape[0] + + # Put into the right rows and columns. + V[0:3, 0:3] = V[3:6, 3:6] = V[6:9, 6:9] = W + num_verts = sd + 1 + cur = num_verts * ncomp + + Vsub = _facet_transform(self.cell, 1, coordinate_mapping) + fdofs = Vsub.shape[0] + V[cur:cur+fdofs, cur:cur+fdofs] = Vsub + cur += fdofs + + # RESCALING FOR CONDITIONING + h = coordinate_mapping.cell_size() + for e in range(num_verts): + V[:, ncomp*e:ncomp*(e+1)] *= 1/(h[e] * h[e]) + + # Note: that the edge DOFs are scaled by edge lengths in FIAT implies + # that they are already have the necessary rescaling to improve + # conditioning. + return ListTensor(V.T) + + def entity_dofs(self): + return {0: {0: [0, 1, 2], + 1: [3, 4, 5], + 2: [6, 7, 8]}, + 1: {0: [9, 10, 11, 12], 1: [13, 14, 15, 16], 2: [17, 18, 19, 20]}, + 2: {0: [21, 22, 23]}} + + @property + def index_shape(self): + return (self.space_dimension(),) + + def space_dimension(self): + return 24 diff --git a/finat/bell.py b/finat/bell.py new file mode 100644 index 000000000..ce7d0002d --- /dev/null +++ b/finat/bell.py @@ -0,0 +1,81 @@ +import FIAT +from math import comb +from gem import ListTensor + +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.argyris import _vertex_transform, _normal_tangential_transform +from copy import deepcopy + + +class Bell(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=5): + if Citations is not None: + Citations().register("Bell1969") + super().__init__(FIAT.Bell(cell)) + + reduced_dofs = deepcopy(self._element.entity_dofs()) + sd = cell.get_spatial_dimension() + for entity in reduced_dofs[sd-1]: + reduced_dofs[sd-1][entity] = [] + self._entity_dofs = reduced_dofs + + def basis_transformation(self, coordinate_mapping): + # Jacobian at barycenter + sd = self.cell.get_spatial_dimension() + top = self.cell.get_topology() + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + + numbf = self._element.space_dimension() + ndof = self.space_dimension() + # rectangular to toss out the constraint dofs + V = identity(numbf, ndof) + + vorder = 2 + _vertex_transform(V, vorder, self.cell, coordinate_mapping) + + voffset = comb(sd + vorder, vorder) + for e in sorted(top[1]): + s = len(top[0]) * voffset + e + v0id, v1id = (v * voffset for v in top[1][e]) + Bnn, Bnt, Jt = _normal_tangential_transform(self.cell, J, detJ, e) + + # vertex points + V[s, v1id] = 1/21 * Bnt + V[s, v0id] = -1 * V[s, v1id] + + # vertex derivatives + for i in range(sd): + V[s, v1id+1+i] = -1/42 * Bnt * Jt[i] + V[s, v0id+1+i] = V[s, v1id+1+i] + + # second derivatives + tau = [Jt[0]*Jt[0], 2*Jt[0]*Jt[1], Jt[1]*Jt[1]] + for i in range(len(tau)): + V[s, v1id+3+i] = 1/252 * Bnt * tau[i] + V[s, v0id+3+i] = -1 * V[s, v1id+3+i] + + # Patch up conditioning + h = coordinate_mapping.cell_size() + for v in sorted(top[0]): + s = voffset * v + 1 + V[:, s:s+sd] *= 1/h[v] + V[:, s+sd:voffset*(v+1)] *= 1/(h[v]*h[v]) + + return ListTensor(V.T) + + # This wipes out the edge dofs. FIAT gives a 21 DOF element + # because we need some extra functions to help with transforming + # under the edge constraint. However, we only have an 18 DOF + # element. + def entity_dofs(self): + return self._entity_dofs + + @property + def index_shape(self): + return (18,) + + def space_dimension(self): + return 18 diff --git a/finat/bernardi_raugel.py b/finat/bernardi_raugel.py new file mode 100644 index 000000000..90969d961 --- /dev/null +++ b/finat/bernardi_raugel.py @@ -0,0 +1,16 @@ +import FIAT + +from finat.physically_mapped import Citations +from finat.piola_mapped import PiolaBubbleElement + + +class BernardiRaugel(PiolaBubbleElement): + def __init__(self, cell, order=1): + if Citations is not None: + Citations().register("BernardiRaugel1985") + super().__init__(FIAT.BernardiRaugel(cell, order=order)) + + +class BernardiRaugelBubble(BernardiRaugel): + def __init__(self, cell, degree=None): + super().__init__(cell, order=0) diff --git a/finat/cell_tools.py b/finat/cell_tools.py new file mode 100644 index 000000000..cedc8bc66 --- /dev/null +++ b/finat/cell_tools.py @@ -0,0 +1,5 @@ +"""Find the maximal complex in a list of cell complexes. +This is a pass-through from FIAT so that FInAT clients +(e.g. tsfc) don't have to directly import FIAT.""" + +from FIAT.reference_element import max_complex # noqa: F401 diff --git a/finat/christiansen_hu.py b/finat/christiansen_hu.py new file mode 100644 index 000000000..abdfb99f6 --- /dev/null +++ b/finat/christiansen_hu.py @@ -0,0 +1,11 @@ +import FIAT + +from finat.physically_mapped import Citations +from finat.piola_mapped import PiolaBubbleElement + + +class ChristiansenHu(PiolaBubbleElement): + def __init__(self, cell, degree=1): + if Citations is not None: + Citations().register("ChristiansenHu2019") + super().__init__(FIAT.ChristiansenHu(cell, degree)) diff --git a/finat/cube.py b/finat/cube.py new file mode 100644 index 000000000..39aeccd04 --- /dev/null +++ b/finat/cube.py @@ -0,0 +1,100 @@ +from __future__ import absolute_import, division, print_function + +from FIAT.reference_element import (UFCHexahedron, UFCQuadrilateral, + compute_unflattening_map, flatten_entities, + flatten_permutations) +from FIAT.tensor_product import FlattenedDimensions as FIAT_FlattenedDimensions +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase + + +class FlattenedDimensions(FiniteElementBase): + """Class for elements on quadrilaterals and hexahedra. Wraps a tensor + product element on a tensor product cell, and flattens its entity + dimensions.""" + + def __init__(self, element): + super().__init__() + self.product = element + self._unflatten = compute_unflattening_map(element.cell.get_topology()) + + @cached_property + def cell(self): + dim = self.product.cell.get_spatial_dimension() + if dim == 2: + return UFCQuadrilateral() + elif dim == 3: + return UFCHexahedron() + else: + raise NotImplementedError("Cannot guess cell for spatial dimension %s" % dim) + + @property + def complex(self): + return self.product.complex + + @property + def degree(self): + unique_degree, = set(self.product.degree) + return unique_degree + + @property + def formdegree(self): + return self.product.formdegree + + @cached_property + def _entity_dofs(self): + return flatten_entities(self.product.entity_dofs()) + + @cached_property + def _entity_support_dofs(self): + return flatten_entities(self.product.entity_support_dofs()) + + def entity_dofs(self): + return self._entity_dofs + + @cached_property + def entity_permutations(self): + return flatten_permutations(self.product.entity_permutations) + + def space_dimension(self): + return self.product.space_dimension() + + @cached_property + def fiat_equivalent(self): + return FIAT_FlattenedDimensions(self.product.fiat_equivalent) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + """Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set object. + :param entity: the cell entity on which to tabulate. + """ + if entity is None: + entity = (self.cell.get_spatial_dimension(), 0) + + return self.product.basis_evaluation(order, ps, self._unflatten[entity]) + + def point_evaluation(self, order, point, entity=None): + if entity is None: + entity = (self.cell.get_spatial_dimension(), 0) + + return self.product.point_evaluation(order, point, self._unflatten[entity]) + + @property + def dual_basis(self): + return self.product.dual_basis + + @property + def index_shape(self): + return self.product.index_shape + + @property + def value_shape(self): + return self.product.value_shape + + @property + def mapping(self): + return self.product.mapping diff --git a/finat/direct_serendipity.py b/finat/direct_serendipity.py new file mode 100644 index 000000000..fd084a4d0 --- /dev/null +++ b/finat/direct_serendipity.py @@ -0,0 +1,490 @@ +from itertools import chain, repeat + +import gem +import numpy +import sympy +try: + import symengine + symbolics = symengine +except ImportError: + symbolics = sympy +from FIAT.polynomial_set import mis +from FIAT.reference_element import UFCQuadrilateral +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase +from finat.physically_mapped import Citations, DirectlyDefinedElement +from finat.sympy2gem import sympy2gem + + +class DirectSerendipity(DirectlyDefinedElement, FiniteElementBase): + def __init__(self, cell, degree): + if Citations is not None: + Citations().register("Arbogast2017") + + # These elements only known currently on quads + assert isinstance(cell, UFCQuadrilateral) + + self._cell = cell + self._degree = degree + self._deriv_cache = {} + + @property + def cell(self): + return self._cell + + @property + def complex(self): + return self._cell + + @property + def degree(self): + return self._degree + + @property + def formdegree(self): + return 0 + + def entity_dofs(self): + if self.degree == 1: + return {0: {i: [i] for i in range(4)}, + 1: {i: [] for i in range(4)}, + 2: {0: []}} + elif self.degree == 2: + return {0: {i: [i] for i in range(4)}, + 1: {i: [i+4] for i in range(4)}, + 2: {0: []}} + else: + return {0: {i: [i] for i in range(4)}, + 1: {i: list(range(4 + i * (self.degree-1), + 4 + (i + 1) * (self.degree-1))) + for i in range(4)}, + 2: {0: list(range(4 + 4 * (self.degree - 1), + self.space_dimension()))}} + + def space_dimension(self): + return 4 if self.degree == 1 else (self.degree+1)*(self.degree+2)//2 + 2 + + @property + def index_shape(self): + return (self.space_dimension(),) + + @property + def value_shape(self): + return () + + @cached_property + def _basis(self): + return ds_sym(self.cell.topology, self.degree, sp=symbolics) + + def _basis_deriv(self, xx, alpha): + key = (tuple(xx), alpha) + _, _, phis = self._basis + try: + return self._deriv_cache[key] + except KeyError: + dphi = tuple(diff(phi, xx, alpha) for phi in phis) + return self._deriv_cache.setdefault(key, dphi) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set. + :param entity: the cell entity on which to tabulate. + ''' + # Build everything in sympy + vs, xx, _ = self._basis + + # and convert -- all this can be used for each derivative! + phys_verts = coordinate_mapping.physical_vertices() + + phys_points = gem.partial_indexed( + coordinate_mapping.physical_points(ps, entity=entity), + ps.indices) + + repl = dict((vs[idx], phys_verts[idx]) + for idx in numpy.ndindex(vs.shape)) + + repl.update(zip(xx, phys_points)) + + mapper = gem.node.Memoizer(sympy2gem) + mapper.bindings = repl + + result = {} + + for i in range(order+1): + alphas = mis(2, i) + for alpha in alphas: + dphis = self._basis_deriv(xx, alpha) + result[alpha] = gem.ListTensor(list(map(mapper, dphis))) + + return result + + def point_evaluation(self, order, refcoords, entity=None): + raise NotImplementedError("Not done yet, sorry!") + + def mapping(self): + return "physical" + + +def xysub(x, y): + return {x[0]: y[0], x[1]: y[1]} + + +def ds1_sym(ct, *, vs=None, sp=symbolics): + """Constructs lowest-order case of Arbogast's directly defined C^0 serendipity + elements, which are a special case. + :param ct: The cell topology of the reference quadrilateral. + :param vs: (Optional) coordinates of cell on which to construct the basis. + If it is None, this function constructs symbols for the vertices. + :returns: a 3-tuple containing symbols for the physical cell coordinates and the + physical cell independent variables (e.g. "x" and "y") and a list + of the four basis functions. + """ + if vs is None: + vs = numpy.asarray(list(zip(sp.symbols('x:4'), + sp.symbols('y:4')))) + else: + vs = numpy.asarray(vs) + + xx = numpy.asarray(sp.symbols("x,y")) + + ts = numpy.zeros((4, 2), dtype=object) + for e in range(4): + v0id, v1id = ct[1][e][:] + for j in range(2): + ts[e, :] = vs[v1id, :] - vs[v0id, :] + + ns = numpy.zeros((4, 2), dtype=object) + for e in (0, 3): + ns[e, 0] = -ts[e, 1] + ns[e, 1] = ts[e, 0] + + for e in (1, 2): + ns[e, 0] = ts[e, 1] + ns[e, 1] = -ts[e, 0] + + xstars = numpy.zeros((4, 2), dtype=object) + for e in range(4): + v0id, v1id = ct[1][e][:] + xstars[e, :] = (vs[v0id, :] + vs[v1id])/2 + + lams = [(xx-xstars[i, :]) @ ns[i, :] for i in range(4)] + + RV = (lams[0] - lams[1]) / (lams[0] + lams[1]) + RH = (lams[2] - lams[3]) / (lams[2] + lams[3]) + Rs = [RV, RH] + + xis = [] + for e in range(4): + dct = xysub(xx, xstars[e, :]) + i = 2*((3-e)//2) + j = i + 1 + xi = lams[i] * lams[j] * (1 + (-1)**(e+1) * Rs[e//2]) / lams[i].subs(dct) / lams[j].subs(dct) / 2 + xis.append(xi) + + d = xysub(xx, vs[0, :]) + r = lams[1] * lams[3] / lams[1].subs(d) / lams[3].subs(d) + d = xysub(xx, vs[2, :]) + r -= lams[0] * lams[3] / lams[0].subs(d) / lams[3].subs(d) + d = xysub(xx, vs[3, :]) + r += lams[0] * lams[2] / lams[0].subs(d) / lams[2].subs(d) + d = xysub(xx, vs[1, :]) + r -= lams[1] * lams[2] / lams[1].subs(d) / lams[2].subs(d) + R = r - sum([r.subs(xysub(xx, xstars[i, :])) * xis[i] for i in range(4)]) + + n03 = numpy.array([[0, -1], [1, 0]]) @ (vs[3, :] - vs[0, :]) + lam03 = (xx - vs[0, :]) @ n03 + n12 = numpy.array([[0, -1], [1, 0]]) @ (vs[2, :] - vs[1, :]) + lam12 = (xx - vs[2, :]) @ n12 + + phi0tilde = lam12 - lam12.subs({xx[0]: vs[3, 0], xx[1]: vs[3, 1]}) * (1 + R) / 2 + phi1tilde = lam03 - lam03.subs({xx[0]: vs[2, 0], xx[1]: vs[2, 1]}) * (1 - R) / 2 + phi2tilde = lam03 - lam03.subs({xx[0]: vs[1, 0], xx[1]: vs[1, 1]}) * (1 - R) / 2 + phi3tilde = lam12 - lam12.subs({xx[0]: vs[0, 0], xx[1]: vs[0, 1]}) * (1 + R) / 2 + + phis = [] + for i, phitilde in enumerate([phi0tilde, phi1tilde, phi2tilde, phi3tilde]): + phi = phitilde / phitilde.subs({xx[0]: vs[i, 0], xx[1]: vs[i, 1]}) + phis.append(phi) + + return vs, xx, numpy.asarray(phis) + + +def newton_dd(nds, fs): + """Constructs Newton's divided differences for the input arrays, + which may include symbolic values.""" + n = len(nds) + mat = numpy.zeros((n, n), dtype=object) + mat[:, 0] = fs[:] + for j in range(1, n): + for i in range(n-j): + mat[i, j] = (mat[i+1, j-1] - mat[i, j-1]) / (nds[i+j] - nds[i]) + return mat[0, :] + + +def newton_poly(nds, fs, xsym): + """Constructs Lagrange interpolating polynomial passing through + x values nds and y values fs. Returns a a symbolic object in terms + of independent variable xsym.""" + coeffs = newton_dd(nds, fs) + result = coeffs[-1] + n = len(coeffs) + for i in range(n-2, -1, -1): + result = result * (xsym - nds[i]) + coeffs[i] + return result + + +def diff(expr, xx, alpha): + """Differentiate expr with respect to xx. + + :arg expr: symengine/symengine Expression to differentiate. + :arg xx: iterable of coordinates to differentiate with respect to. + :arg alpha: derivative multiindex, one entry for each entry of xx + indicating how many derivatives in that direction. + :returns: New symengine/symengine expression.""" + if isinstance(expr, sympy.Expr): + return expr.diff(*(zip(xx, alpha))) + else: + return symengine.diff(expr, *(chain(*(repeat(x, a) for x, a in zip(xx, alpha))))) + + +def dsr_sym(ct, r, *, vs=None, sp=symbolics): + """Constructs higher-order (>= 2) case of Arbogast's directly defined C^0 serendipity + elements, which include all polynomials of degree r plus a couple of rational + functions. + :param ct: The cell topology of the reference quadrilateral. + :param vs: (Optional) coordinates of cell on which to construct the basis. + If it is None, this function constructs symbols for the vertices. + :returns: a 3-tuple containing symbols for the physical cell coordinates and the + physical cell independent variables (e.g. "x" and "y") and a list + of the four basis functions. + """ + if vs is None: # do vertices symbolically + vs = numpy.asarray(list(zip(sp.symbols('x:4'), + sp.symbols('y:4')))) + else: + vs = numpy.asarray(vs) + xx = numpy.asarray(sp.symbols("x,y")) + + ts = numpy.zeros((4, 2), dtype=object) + for e in range(4): + v0id, v1id = ct[1][e][:] + ts[e, :] = vs[v1id, :] - vs[v0id, :] + + ns = numpy.zeros((4, 2), dtype=object) + for e in (0, 3): + ns[e, 0] = -ts[e, 1] + ns[e, 1] = ts[e, 0] + + for e in (1, 2): + ns[e, 0] = ts[e, 1] + ns[e, 1] = -ts[e, 0] + + # midpoints of each edge + xstars = numpy.zeros((4, 2), dtype=object) + for e in range(4): + v0id, v1id = ct[1][e][:] + xstars[e, :] = (vs[v0id, :] + vs[v1id])/2 + + lams = [(xx-xstars[i, :]) @ ns[i, :] for i in range(4)] + + # # internal functions + bubble = numpy.prod(lams) + + if r < 4: + internal_bfs = [] + internal_nodes = [] + elif r == 4: # Just one point + xbar = sum(vs[i, 0] for i in range(4)) / 4 + ybar = sum(vs[i, 1] for i in range(4)) / 4 + internal_bfs = [bubble / bubble.subs(xysub(xx, (xbar, ybar)))] + internal_nodes = [(xbar, ybar)] + else: # build a triangular lattice inside the quad + dx0 = (vs[1, :] - vs[0, :]) / (r-2) + dx1 = (vs[2, :] - vs[0, :]) / (r-2) + + # Vertices of the triangle + v0 = vs[0, :] + dx0 + dx1 + v1 = vs[0, :] + (r-3) * dx0 + dx1 + v2 = vs[0, :] + dx0 + (r-3) * dx1 + + # Pardon the fortran, but these are barycentric coordinates... + bary = numpy.zeros((3,), dtype="object") + y12 = v1[1] - v2[1] + x21 = v2[0] - v1[0] + x02 = v0[0] - v2[0] + y02 = v0[1] - v2[1] + det = y12 * x02 + x21 * y02 + delx = xx[0] - v2[0] + dely = xx[1] - v2[1] + bary[0] = (y12 * delx + x21 * dely) / det + bary[1] = (-y02 * delx + x02 * dely) / det + bary[2] = 1 - bary[0] - bary[1] + + # And this bit directly constructs the Lagrange polynomials + # of degree r-4 on the triangular lattice inside the triangle. + # This trick is restricted to equispaced points, but we're on a much + # lower degree than r. This bypasses symbolic inversion/etc otherwise + # required to build the Lagrange polynomials. + rm4 = r - 4 + lags = [] + internal_nodes = [] + for i in range(rm4, -1, -1): + for j in range(rm4-i, -1, -1): + k = rm4 - i - j + internal_nodes.append((v0 * i + v1 * j + v2 * k)/rm4) + ii = (i, j, k) + lag_cur = sp.Integer(1) + for q in range(3): + for p in range(ii[q]): + lag_cur *= (rm4 * bary[q] - p) / (ii[q] - p) + lags.append(lag_cur.simplify()) + + internal_bfs = [] + for lag, nd in zip(lags, internal_nodes): + foo = lag * bubble + internal_bfs.append(foo / foo.subs(xysub(xx, nd))) + + RV = (lams[0] - lams[1]) / (lams[0] + lams[1]) + RH = (lams[2] - lams[3]) / (lams[2] + lams[3]) + + # R for each edge (1 on edge, zero on opposite + Rs = [(1 - RV) / 2, (1 + RV) / 2, (1 - RH) / 2, (1 + RH) / 2] + + nodes1d = [sp.Rational(i, r) for i in range(1, r)] + + s = sp.Symbol('s') + + # for each edge: + # I need its adjacent two edges and its opposite edge + # and its "tunnel R" RH or RV + # This is very 2d specific. + opposite_edges = {e: [eother for eother in ct[1] + if set(ct[1][e]).intersection(ct[1][eother]) == set()][0] + for e in ct[1]} + adjacent_edges = {e: tuple(sorted([eother for eother in ct[1] + if eother != e + and set(ct[1][e]).intersection(ct[1][eother]) + != set()])) + for e in ct[1]} + + ae = adjacent_edges + tunnel_R_edges = {e: ((lams[ae[e][0]] - lams[ae[e][1]]) + / (lams[ae[e][0]] + lams[ae[e][1]])) + for e in range(4)} + edge_nodes = [] + for ed in range(4): + ((v0x, v0y), (v1x, v1y)) = vs[ct[1][ed], :] + delx = v1x - v0x + dely = v1y - v0y + edge_nodes.append([(v0x+nd*delx, v0y+nd*dely) for nd in nodes1d]) + + # subtracts off the value of function at internal nodes times those + # internal basis functions + def nodalize(f): + return f - sum(f.subs(xysub(xx, nd)) * bf + for bf, nd in zip(internal_bfs, internal_nodes)) + + edge_bfs = [] + if r == 2: + for ed in range(4): + lamadj0 = lams[adjacent_edges[ed][0]] + lamadj1 = lams[adjacent_edges[ed][1]] + ephi = lamadj0 * lamadj1 * Rs[ed] + phi = nodalize(ephi) / ephi.subs(xysub(xx, xstars[ed])) + edge_bfs.append([phi]) + else: + for ed in range(4): + ((v0x, v0y), (v1x, v1y)) = vs[ct[1][ed], :] + Rcur = tunnel_R_edges[ed] + lam_op = lams[opposite_edges[ed]] + + edge_bfs_cur = [] + + for i in range(len(nodes1d)): + # strike out i:th node + idcs = [j for j in range(len(nodes1d)) if i != j] + nodes1d_cur = [nodes1d[j] for j in idcs] + edge_nodes_cur = [edge_nodes[ed][j] + for j in idcs] + + # construct the 1d interpolation with remaining nodes + pvals = [] + for nd in edge_nodes_cur: + sub = xysub(xx, nd) + pval_cur = (-1 * Rcur.subs(sub)**(r-2) + / lam_op.subs(sub)) + pvals.append(pval_cur) + + ptilde = newton_poly(nodes1d_cur, pvals, s) + xt = xx @ ts[ed] + vt0 = numpy.asarray((v0x, v0y)) @ ts[ed] + vt1 = numpy.asarray((v1x, v1y)) @ ts[ed] + p = ptilde.subs({s: (xt-vt0) / (vt1-vt0)}) + + prebf = (lams[adjacent_edges[ed][0]] + * lams[adjacent_edges[ed][1]] + * (lams[opposite_edges[ed]] * p + + Rcur**(r-2) * Rs[ed])) + + prebf = nodalize(prebf) + bfcur = prebf / prebf.subs(xysub(xx, edge_nodes[ed][i])) + edge_bfs_cur.append(bfcur) + + edge_bfs.append(edge_bfs_cur) + + # vertex basis functions + vertex_to_adj_edges = {i: tuple([e for e in ct[1] if i in ct[1][e]]) + for i in ct[0]} + vertex_to_off_edges = {i: tuple([e for e in ct[1] if i not in ct[1][e]]) + for i in ct[0]} + + vertex_bfs = [] + for v in range(4): + ed0, ed1 = vertex_to_off_edges[v] + lam0 = lams[ed0] + lam1 = lams[ed1] + + prebf = lam0 * lam1 + + # subtract off edge values + for adj_ed in vertex_to_adj_edges[v]: + edge_nodes_cur = edge_nodes[adj_ed] + edge_bfs_cur = edge_bfs[adj_ed] + for k, (nd, edbf) in enumerate(zip(edge_nodes_cur, edge_bfs_cur)): + sb = xysub(xx, nd) + prebf -= lam0.subs(sb) * lam1.subs(sb) * edbf + + bf = nodalize(prebf) / prebf.subs(xysub(xx, vs[v, :])) + vertex_bfs.append(bf) + + bfs = vertex_bfs + for edbfs in edge_bfs: + bfs.extend(edbfs) + bfs.extend(internal_bfs) + + nds = [tuple(vs[i, :]) for i in range(4)] + for ends in edge_nodes: + nds.extend(ends) + nds.extend(internal_nodes) + + return vs, xx, numpy.asarray(bfs) + + +def ds_sym(ct, r, *, vs=None, sp=symbolics): + """Symbolically Constructs Arbogast's directly defined C^0 serendipity elements, + which include all polynomials of degree r plus a couple of rational functions. + :param ct: The cell topology of the reference quadrilateral. + :param vs: (Optional) coordinates of cell on which to construct the basis. + If it is None, this function constructs symbols for the vertices. + :returns: a 3-tuple containing symbols for the physical cell coordinates and the + physical cell independent variables (e.g. "x" and "y") and a list + of the four basis functions. + """ + if r == 1: + return ds1_sym(ct, vs=vs, sp=sp) + else: + return dsr_sym(ct, r, vs=vs, sp=sp) diff --git a/finat/discontinuous.py b/finat/discontinuous.py new file mode 100644 index 000000000..f5d15ae01 --- /dev/null +++ b/finat/discontinuous.py @@ -0,0 +1,80 @@ +import FIAT + +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase + + +class DiscontinuousElement(FiniteElementBase): + """Element wrapper that makes a FInAT element discontinuous.""" + + def __init__(self, element): + super().__init__() + self.element = element + + @property + def cell(self): + return self.element.cell + + @property + def complex(self): + return self.element.complex + + @property + def degree(self): + return self.element.degree + + @cached_property + def formdegree(self): + # Always discontinuous! + return self.element.cell.get_spatial_dimension() + + @cached_property + def _entity_dofs(self): + result = {dim: {i: [] for i in entities} + for dim, entities in self.cell.get_topology().items()} + cell_dimension = self.cell.get_dimension() + result[cell_dimension][0].extend(range(self.space_dimension())) + return result + + def entity_dofs(self): + return self._entity_dofs + + @cached_property + def entity_permutations(self): + # Return entity_permutations of the base finite element if it only + # has cell degrees of freedom; otherwise entity_permutations is not + # yet implemented for DiscontinuousElement. + if self.element.entity_dofs() == self.element.entity_closure_dofs(): + return self.element.entity_permutations + else: + raise NotImplementedError(f"entity_permutations not yet implemented for a general {type(self)}") + + def space_dimension(self): + return self.element.space_dimension() + + @property + def index_shape(self): + return self.element.index_shape + + @property + def value_shape(self): + return self.element.value_shape + + @cached_property + def fiat_equivalent(self): + return FIAT.DiscontinuousElement(self.element.fiat_equivalent) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + return self.element.basis_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping) + + def point_evaluation(self, order, refcoords, entity=None): + return self.element.point_evaluation(order, refcoords, entity) + + @property + def dual_basis(self): + return self.element.dual_basis + + @property + def mapping(self): + return self.element.mapping diff --git a/finat/enriched.py b/finat/enriched.py new file mode 100644 index 000000000..29e42feab --- /dev/null +++ b/finat/enriched.py @@ -0,0 +1,203 @@ +from functools import partial +from itertools import chain +from operator import add, methodcaller + +import FIAT +import gem +import numpy +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase + + +class EnrichedElement(FiniteElementBase): + """A finite element whose basis functions are the union of the + basis functions of several other finite elements.""" + + def __new__(cls, elements): + elements = tuple(chain.from_iterable(e.elements if isinstance(e, EnrichedElement) else (e,) for e in elements)) + if len(elements) == 1: + return elements[0] + else: + self = super().__new__(cls) + self.elements = elements + return self + + @cached_property + def cell(self): + result, = set(elem.cell for elem in self.elements) + return result + + @cached_property + def complex(self): + return FIAT.reference_element.max_complex(set(elem.complex for elem in self.elements)) + + @cached_property + def degree(self): + return tree_map(max, *[elem.degree for elem in self.elements]) + + @cached_property + def formdegree(self): + ks = set(elem.formdegree for elem in self.elements) + if None in ks: + return None + else: + return max(ks) + + def entity_dofs(self): + '''Return the map of topological entities to degrees of + freedom for the finite element.''' + return concatenate_entity_dofs(self.cell, self.elements, + methodcaller("entity_dofs")) + + @cached_property + def entity_permutations(self): + '''Return the map of topological entities to the map of + orientations to permutation lists for the finite element''' + return concatenate_entity_permutations(self.elements) + + @cached_property + def _entity_support_dofs(self): + return concatenate_entity_dofs(self.cell, self.elements, + methodcaller("entity_support_dofs")) + + def space_dimension(self): + '''Return the dimension of the finite element space.''' + return sum(elem.space_dimension() for elem in self.elements) + + @cached_property + def index_shape(self): + return (self.space_dimension(),) + + @cached_property + def value_shape(self): + '''A tuple indicating the shape of the element.''' + shape, = set(elem.value_shape for elem in self.elements) + return shape + + @cached_property + def fiat_equivalent(self): + if self.is_mixed: + # EnrichedElement is actually a MixedElement + return FIAT.MixedElement([e.element.fiat_equivalent + for e in self.elements], ref_el=self.cell) + else: + return FIAT.EnrichedElement(*(e.fiat_equivalent + for e in self.elements)) + + @cached_property + def is_mixed(self): + # Avoid circular import dependency + from finat.mixed import MixedSubElement + + return all(isinstance(e, MixedSubElement) for e in self.elements) + + def _compose_evaluations(self, results): + keys, = set(map(frozenset, results)) + + def merge(tables): + tables = tuple(tables) + zeta = self.get_value_indices() + tensors = [] + for elem, table in zip(self.elements, tables): + beta_i = elem.get_indices() + tensors.append(gem.ComponentTensor( + gem.Indexed(table, beta_i + zeta), + beta_i + )) + beta = self.get_indices() + return gem.ComponentTensor( + gem.Indexed(gem.Concatenate(*tensors), beta), + beta + zeta + ) + return {key: merge(result[key] for result in results) + for key in keys} + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set object. + :param entity: the cell entity on which to tabulate. + ''' + results = [element.basis_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping) + for element in self.elements] + return self._compose_evaluations(results) + + def point_evaluation(self, order, refcoords, entity=None): + '''Return code for evaluating the element at an arbitrary points on + the reference element. + + :param order: return derivatives up to this order. + :param refcoords: GEM expression representing the coordinates + on the reference entity. Its shape must be + a vector with the correct dimension, its + free indices are arbitrary. + :param entity: the cell entity on which to tabulate. + ''' + results = [element.point_evaluation(order, refcoords, entity) + for element in self.elements] + return self._compose_evaluations(results) + + @property + def mapping(self): + mappings = set(elem.mapping for elem in self.elements) + if len(mappings) != 1: + return None + else: + result, = mappings + return result + + +def tree_map(f, *args): + """Like the built-in :py:func:`map`, but applies to a tuple tree.""" + nonleaf, = set(isinstance(arg, tuple) for arg in args) + if nonleaf: + ndim, = set(map(len, args)) # asserts equal arity of all args + return tuple(tree_map(f, *subargs) for subargs in zip(*args)) + else: + return f(*args) + + +def concatenate_entity_dofs(ref_el, elements, method): + """Combine the entity DoFs from a list of elements into a combined + dict containing the information for the concatenated DoFs of all + the elements. + + :arg ref_el: the reference cell + :arg elements: subelement whose DoFs are concatenated + :arg method: method to obtain the entity DoFs dict + :returns: concatenated entity DoFs dict + """ + entity_dofs = {dim: {i: [] for i in entities} + for dim, entities in ref_el.get_topology().items()} + offsets = numpy.cumsum([0] + list(e.space_dimension() + for e in elements), dtype=int) + for i, d in enumerate(map(method, elements)): + for dim, dofs in d.items(): + for ent, off in dofs.items(): + entity_dofs[dim][ent] += list(map(partial(add, offsets[i]), off)) + return entity_dofs + + +def concatenate_entity_permutations(elements): + """For each dimension, for each entity, and for each possible + entity orientation, collect the DoF permutation lists from + entity_permutations dicts of elements and concatenate them. + + :arg elements: subelements whose DoF permutation lists are concatenated + :returns: entity_permutation dict of the :class:`EnrichedElement` object + composed of elements. + """ + permutations = {} + for element in elements: + for dim, e_o_p_map in element.entity_permutations.items(): + dim_permutations = permutations.setdefault(dim, {}) + for e, o_p_map in e_o_p_map.items(): + e_dim_permutations = dim_permutations.setdefault(e, {}) + for o, p in o_p_map.items(): + o_e_dim_permutations = e_dim_permutations.setdefault(o, []) + offset = len(o_e_dim_permutations) + o_e_dim_permutations += list(offset + q for q in p) + return permutations diff --git a/finat/fiat_elements.py b/finat/fiat_elements.py new file mode 100644 index 000000000..1f2081894 --- /dev/null +++ b/finat/fiat_elements.py @@ -0,0 +1,482 @@ +import FIAT +import gem +import numpy as np +import sympy as sp +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase +from finat.point_set import PointSet +from finat.sympy2gem import sympy2gem + +try: + from firedrake_citations import Citations + Citations().add("Geevers2018new", """ +@article{Geevers2018new, + title={New higher-order mass-lumped tetrahedral elements for wave propagation modelling}, + author={Geevers, Sjoerd and Mulder, Wim A and van der Vegt, Jaap JW}, + journal={SIAM journal on scientific computing}, + volume={40}, + number={5}, + pages={A2830--A2857}, + year={2018}, + publisher={SIAM}, + doi={https://doi.org/10.1137/18M1175549}, +} +""") + Citations().add("Chin1999higher", """ +@article{chin1999higher, + title={Higher-order triangular and tetrahedral finite elements with mass lumping for solving the wave equation}, + author={Chin-Joe-Kong, MJS and Mulder, Wim A and Van Veldhuizen, M}, + journal={Journal of Engineering Mathematics}, + volume={35}, + number={4}, + pages={405--426}, + year={1999}, + publisher={Springer}, + doi={https://doi.org/10.1023/A:1004420829610}, +} +""") +except ImportError: + Citations = None + + +class FiatElement(FiniteElementBase): + """Base class for finite elements for which the tabulation is provided + by FIAT.""" + def __init__(self, fiat_element): + super().__init__() + self._element = fiat_element + + @property + def cell(self): + return self._element.get_reference_element() + + @property + def complex(self): + return self._element.get_reference_complex() + + @property + def degree(self): + # Requires FIAT.CiarletElement + return self._element.degree() + + @property + def formdegree(self): + return self._element.get_formdegree() + + def entity_dofs(self): + return self._element.entity_dofs() + + def entity_closure_dofs(self): + return self._element.entity_closure_dofs() + + @property + def entity_permutations(self): + return self._element.entity_permutations() + + def space_dimension(self): + return self._element.space_dimension() + + @property + def index_shape(self): + return (self._element.space_dimension(),) + + @property + def value_shape(self): + return self._element.value_shape() + + @property + def fiat_equivalent(self): + # Just return the underlying FIAT element + return self._element + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set. + :param entity: the cell entity on which to tabulate. + ''' + space_dimension = self._element.space_dimension() + value_size = np.prod(self._element.value_shape(), dtype=int) + fiat_result = self._element.tabulate(order, ps.points, entity) + result = {} + # In almost all cases, we have + # self.space_dimension() == self._element.space_dimension() + # But for Bell, FIAT reports 21 basis functions, + # but FInAT only 18 (because there are actually 18 + # basis functions, and the additional 3 are for + # dealing with transformations between physical + # and reference space). + index_shape = (self._element.space_dimension(),) + for alpha, fiat_table in fiat_result.items(): + if isinstance(fiat_table, Exception): + result[alpha] = gem.Failure(self.index_shape + self.value_shape, fiat_table) + continue + + derivative = sum(alpha) + table_roll = fiat_table.reshape( + space_dimension, value_size, len(ps.points) + ).transpose(1, 2, 0) + + exprs = [] + for table in table_roll: + if derivative == self.degree and not self.complex.is_macrocell(): + # Make sure numerics satisfies theory + exprs.append(gem.Literal(table[0])) + elif derivative > self.degree: + # Make sure numerics satisfies theory + assert np.allclose(table, 0.0) + exprs.append(gem.Literal(np.zeros(self.index_shape))) + else: + point_indices = ps.indices + point_shape = tuple(index.extent for index in point_indices) + + exprs.append(gem.partial_indexed( + gem.Literal(table.reshape(point_shape + index_shape)), + point_indices + )) + if self.value_shape: + # As above, this extent may be different from that + # advertised by the finat element. + beta = tuple(gem.Index(extent=i) for i in index_shape) + assert len(beta) == len(self.get_indices()) + + zeta = self.get_value_indices() + result[alpha] = gem.ComponentTensor( + gem.Indexed( + gem.ListTensor(np.array( + [gem.Indexed(expr, beta) for expr in exprs] + ).reshape(self.value_shape)), + zeta), + beta + zeta + ) + else: + expr, = exprs + result[alpha] = expr + return result + + def point_evaluation(self, order, refcoords, entity=None): + '''Return code for evaluating the element at an arbitrary points on + the reference element. + + :param order: return derivatives up to this order. + :param refcoords: GEM expression representing the coordinates + on the reference entity. Its shape must be + a vector with the correct dimension, its + free indices are arbitrary. + :param entity: the cell entity on which to tabulate. + ''' + if entity is None: + entity = (self.cell.get_dimension(), 0) + entity_dim, entity_i = entity + + # Spatial dimension of the entity + esd = self.cell.construct_subelement(entity_dim).get_spatial_dimension() + assert isinstance(refcoords, gem.Node) and refcoords.shape == (esd,) + + return point_evaluation(self._element, order, refcoords, (entity_dim, entity_i)) + + @cached_property + def _dual_basis(self): + # Return the numerical part of the dual basis, this split is + # needed because the dual_basis itself can't produce the same + # point set over and over in case it is used multiple times + # (in for example a tensorproductelement). + fiat_dual_basis = self._element.dual_basis() + seen = dict() + allpts = [] + # Find the unique points to evaluate at. + # We might be able to make this a smaller set by treating each + # point one by one, but most of the redundancy comes from + # multiple functionals using the same quadrature rule. + for dual in fiat_dual_basis: + if len(dual.deriv_dict) != 0: + raise NotImplementedError("FIAT dual bases with derivative nodes represented via a ``Functional.deriv_dict`` property do not currently have a FInAT dual basis") + pts = dual.get_point_dict().keys() + pts = tuple(sorted(pts)) # need this for determinism + if pts not in seen: + # k are indices into Q (see below) for the seen points + kstart = len(allpts) + kend = kstart + len(pts) + seen[pts] = kstart, kend + allpts.extend(pts) + # Build Q. + # Q is a tensor of weights (of total rank R) to contract with a unique + # vector of points to evaluate at, giving a tensor (of total rank R-1) + # where the first indices (rows) correspond to a basis functional + # (node). + # Q is a DOK Sparse matrix in (row, col, higher,..)=>value pairs (to + # become a gem.SparseLiteral when implemented). + # Rows (i) are number of nodes/dual functionals. + # Columns (k) are unique points to evaluate. + # Higher indices (*cmp) are tensor indices of the weights when weights + # are tensor valued. + Q = {} + for i, dual in enumerate(fiat_dual_basis): + point_dict = dual.get_point_dict() + pts = tuple(sorted(point_dict.keys())) + kstart, kend = seen[pts] + for p, k in zip(pts, range(kstart, kend)): + for weight, cmp in point_dict[p]: + Q[(i, k, *cmp)] = weight + if all(len(set(key)) == 1 and np.isclose(weight, 1) and len(key) == 2 + for key, weight in Q.items()): + # Identity matrix Q can be expressed symbolically + extents = tuple(map(max, zip(*Q.keys()))) + js = tuple(gem.Index(extent=e+1) for e in extents) + assert len(js) == 2 + Q = gem.ComponentTensor(gem.Delta(*js), js) + else: + # temporary until sparse literals are implemented in GEM which will + # automatically convert a dictionary of keys internally. + # TODO the below is unnecessarily slow and would be sped up + # significantly by building Q in a COO format rather than DOK (i.e. + # storing coords and associated data in (nonzeros, entries) shaped + # numpy arrays) to take advantage of numpy multiindexing + if len(Q) == 1: + Qshape = tuple(s + 1 for s in tuple(Q)[0]) + else: + Qshape = tuple(s + 1 for s in map(max, *Q)) + Qdense = np.zeros(Qshape, dtype=np.float64) + for idx, value in Q.items(): + Qdense[idx] = value + Q = gem.Literal(Qdense) + return Q, np.asarray(allpts) + + @property + def dual_basis(self): + # Return Q with x.indices already a free index for the + # consumer to use + # expensive numerical extraction is done once per element + # instance, but the point set must be created every time we + # build the dual. + Q, pts = self._dual_basis + x = PointSet(pts) + assert len(x.indices) == 1 + assert Q.shape[1] == x.indices[0].extent + i, *js = gem.indices(len(Q.shape) - 1) + Q = gem.ComponentTensor(gem.Indexed(Q, (i, *x.indices, *js)), (i, *js)) + return Q, x + + @property + def mapping(self): + mappings = set(self._element.mapping()) + if len(mappings) != 1: + return None + else: + result, = mappings + return result + + +def point_evaluation(fiat_element, order, refcoords, entity): + # Coordinates on the reference entity (SymPy) + esd, = refcoords.shape + Xi = sp.symbols('X Y Z')[:esd] + + space_dimension = fiat_element.space_dimension() + value_size = np.prod(fiat_element.value_shape(), dtype=int) + fiat_result = fiat_element.tabulate(order, [Xi], entity) + result = {} + for alpha, fiat_table in fiat_result.items(): + if isinstance(fiat_table, Exception): + result[alpha] = gem.Failure((space_dimension,) + fiat_element.value_shape(), fiat_table) + continue + + # Convert SymPy expression to GEM + mapper = gem.node.Memoizer(sympy2gem) + mapper.bindings = {s: gem.Indexed(refcoords, (i,)) + for i, s in enumerate(Xi)} + gem_table = np.vectorize(mapper)(fiat_table) + + table_roll = gem_table.reshape(space_dimension, value_size).transpose() + + exprs = [] + for table in table_roll: + exprs.append(gem.ListTensor(table.reshape(space_dimension))) + if fiat_element.value_shape(): + beta = (gem.Index(extent=space_dimension),) + zeta = tuple(gem.Index(extent=d) + for d in fiat_element.value_shape()) + result[alpha] = gem.ComponentTensor( + gem.Indexed( + gem.ListTensor(np.array( + [gem.Indexed(expr, beta) for expr in exprs] + ).reshape(fiat_element.value_shape())), + zeta), + beta + zeta + ) + else: + expr, = exprs + result[alpha] = expr + return result + + +class Regge(FiatElement): # symmetric matrix valued + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.Regge(cell, degree, variant=variant)) + + +class HellanHerrmannJohnson(FiatElement): # symmetric matrix valued + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.HellanHerrmannJohnson(cell, degree, variant=variant)) + + +class GopalakrishnanLedererSchoberlFirstKind(FiatElement): # traceless matrix valued + def __init__(self, cell, degree): + super().__init__(FIAT.GopalakrishnanLedererSchoberlFirstKind(cell, degree)) + + +class GopalakrishnanLedererSchoberlSecondKind(FiatElement): # traceless matrix valued + def __init__(self, cell, degree): + super().__init__(FIAT.GopalakrishnanLedererSchoberlSecondKind(cell, degree)) + + +class ScalarFiatElement(FiatElement): + @property + def value_shape(self): + return () + + +class Bernstein(ScalarFiatElement): + # TODO: Replace this with a smarter implementation + def __init__(self, cell, degree): + super().__init__(FIAT.Bernstein(cell, degree)) + + +class Bubble(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.Bubble(cell, degree)) + + +class FacetBubble(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.FacetBubble(cell, degree)) + + +class CrouzeixRaviart(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.CrouzeixRaviart(cell, degree)) + + +class Lagrange(ScalarFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.Lagrange(cell, degree, variant=variant)) + + +class KongMulderVeldhuizen(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.KongMulderVeldhuizen(cell, degree)) + if Citations is not None: + Citations().register("Chin1999higher") + Citations().register("Geevers2018new") + + +class DiscontinuousLagrange(ScalarFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.DiscontinuousLagrange(cell, degree, variant=variant)) + + +class Real(DiscontinuousLagrange): + ... + + +class Serendipity(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.Serendipity(cell, degree)) + + +class DPC(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.DPC(cell, degree)) + + +class DiscontinuousTaylor(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.DiscontinuousTaylor(cell, degree)) + + +class VectorFiatElement(FiatElement): + @property + def value_shape(self): + return (self.cell.get_spatial_dimension(),) + + +class RaviartThomas(VectorFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.RaviartThomas(cell, degree, variant=variant)) + + +class TrimmedSerendipityFace(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.TrimmedSerendipityFace(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class TrimmedSerendipityDiv(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.TrimmedSerendipityDiv(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class TrimmedSerendipityEdge(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.TrimmedSerendipityEdge(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class TrimmedSerendipityCurl(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.TrimmedSerendipityCurl(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class BrezziDouglasMarini(VectorFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.BrezziDouglasMarini(cell, degree, variant=variant)) + + +class BrezziDouglasMariniCubeEdge(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.BrezziDouglasMariniCubeEdge(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class BrezziDouglasMariniCubeFace(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.BrezziDouglasMariniCubeFace(cell, degree)) + + @property + def entity_permutations(self): + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + +class BrezziDouglasFortinMarini(VectorFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.BrezziDouglasFortinMarini(cell, degree)) + + +class Nedelec(VectorFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.Nedelec(cell, degree, variant=variant)) + + +class NedelecSecondKind(VectorFiatElement): + def __init__(self, cell, degree, variant=None): + super().__init__(FIAT.NedelecSecondKind(cell, degree, variant=variant)) diff --git a/finat/finiteelementbase.py b/finat/finiteelementbase.py new file mode 100644 index 000000000..64a6399d2 --- /dev/null +++ b/finat/finiteelementbase.py @@ -0,0 +1,290 @@ +from abc import ABCMeta, abstractmethod, abstractproperty +from itertools import chain + +import gem +import numpy +from gem.interpreter import evaluate +from gem.optimise import delta_elimination, sum_factorise, traverse_product +from gem.utils import cached_property + +from finat.quadrature import make_quadrature + + +class FiniteElementBase(metaclass=ABCMeta): + + @abstractproperty + def cell(self): + '''The reference cell on which the element is defined.''' + + @property + def complex(self): + '''The reference cell complex over which bases are defined. + Can be different than self.cell in the case of macro elements.''' + + @abstractproperty + def degree(self): + '''The degree of the embedding polynomial space. + + In the tensor case this is a tuple. + ''' + + @abstractproperty + def formdegree(self): + '''Degree of the associated form (FEEC)''' + + @abstractmethod + def entity_dofs(self): + '''Return the map of topological entities to degrees of + freedom for the finite element.''' + + @property + def entity_permutations(self): + '''Returns a nested dictionary that gives, for each dimension, + for each entity, and for each possible entity orientation, the + DoF permutation array that maps the entity local DoF ordering + to the canonical global DoF ordering. + + The entity permutations `dict` for the degree 4 Lagrange finite + element on the interval, for instance, is given by: + + .. code-block:: python3 + + {0: {0: {0: [0]}, + 1: {0: [0]}}, + 1: {0: {0: [0, 1, 2], + 1: [2, 1, 0]}}} + + Note that there are two entities on dimension ``0`` (vertices), + each of which has only one possible orientation, while there is + a single entity on dimension ``1`` (interval), which has two + possible orientations representing non-reflected and reflected + intervals. + ''' + raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}") + + @cached_property + def _entity_closure_dofs(self): + # Compute the nodes on the closure of each sub_entity. + entity_dofs = self.entity_dofs() + return {dim: {e: sorted(chain(*[entity_dofs[d][se] + for d, se in sub_entities])) + for e, sub_entities in entities.items()} + for dim, entities in self.cell.sub_entities.items()} + + def entity_closure_dofs(self): + '''Return the map of topological entities to degrees of + freedom on the closure of those entities for the finite + element.''' + return self._entity_closure_dofs + + @cached_property + def _entity_support_dofs(self): + esd = {} + for entity_dim in self.cell.sub_entities.keys(): + beta = self.get_indices() + zeta = self.get_value_indices() + + entity_cell = self.cell.construct_subelement(entity_dim) + quad = make_quadrature(entity_cell, (2*numpy.array(self.degree)).tolist()) + + eps = 1.e-8 # Is this a safe value? + + result = {} + for f in self.entity_dofs()[entity_dim].keys(): + # Tabulate basis functions on the facet + vals, = self.basis_evaluation(0, quad.point_set, entity=(entity_dim, f)).values() + # Integrate the square of the basis functions on the facet. + ints = gem.IndexSum( + gem.Product(gem.IndexSum(gem.Product(gem.Indexed(vals, beta + zeta), + gem.Indexed(vals, beta + zeta)), zeta), + quad.weight_expression), + quad.point_set.indices + ) + evaluation, = evaluate([gem.ComponentTensor(ints, beta)]) + ints = evaluation.arr.flatten() + assert evaluation.fids == () + result[f] = [dof for dof, i in enumerate(ints) if i > eps] + + esd[entity_dim] = result + return esd + + def entity_support_dofs(self): + '''Return the map of topological entities to degrees of + freedom that have non-zero support on those entities for the + finite element.''' + return self._entity_support_dofs + + @abstractmethod + def space_dimension(self): + '''Return the dimension of the finite element space.''' + + @abstractproperty + def index_shape(self): + '''A tuple indicating the number of degrees of freedom in the + element. For example a scalar quadratic Lagrange element on a triangle + would return (6,) while a vector valued version of the same element + would return (6, 2)''' + + @abstractproperty + def value_shape(self): + '''A tuple indicating the shape of the element.''' + + @property + def fiat_equivalent(self): + '''The FIAT element equivalent to this FInAT element.''' + raise NotImplementedError( + f"Cannot make equivalent FIAT element for {type(self).__name__}" + ) + + def get_indices(self): + '''A tuple of GEM :class:`Index` of the correct extents to loop over + the basis functions of this element.''' + + return tuple(gem.Index(extent=d) for d in self.index_shape) + + def get_value_indices(self): + '''A tuple of GEM :class:`~gem.Index` of the correct extents to loop over + the value shape of this element.''' + + return tuple(gem.Index(extent=d) for d in self.value_shape) + + @abstractmethod + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set object. + :param entity: the cell entity on which to tabulate. + :param coordinate_mapping: a + :class:`~.physically_mapped.PhysicalGeometry` object that + provides physical geometry callbacks (may be None). + ''' + + @abstractmethod + def point_evaluation(self, order, refcoords, entity=None): + '''Return code for evaluating the element at an arbitrary points on + the reference element. + + :param order: return derivatives up to this order. + :param refcoords: GEM expression representing the coordinates + on the reference entity. Its shape must be + a vector with the correct dimension, its + free indices are arbitrary. + :param entity: the cell entity on which to tabulate. + ''' + + @property + def dual_basis(self): + '''Return a dual evaluation gem weight tensor Q and point set x to dual + evaluate a function fn at. + + The general dual evaluation is then Q * fn(x) (the contraction of Q + with fn(x) along the the indices of x and any shape introduced by fn). + + If the dual weights are scalar then Q, for a general scalar FIAT + element, is a matrix with dimensions + + .. code-block:: text + + (num_nodes, num_points) + + If the dual weights are tensor valued then Q, for a general tensor + valued FIAT element, is a tensor with dimensions + + .. code-block:: text + + (num_nodes, num_points, dual_weight_shape[0], ..., dual_weight_shape[n]) + + If the dual basis is of a tensor product or FlattenedDimensions element + with N factors then Q in general is a tensor with dimensions + + .. code-block:: text + + (num_nodes_factor1, ..., num_nodes_factorN, + num_points_factor1, ..., num_points_factorN, + dual_weight_shape[0], ..., dual_weight_shape[n]) + + where num_points_factorX are made free indices that match the free + indices of x (which is now a TensorPointSet). + + If the dual basis is of a tensor finite element with some shape + (S1, S2, ..., Sn) then the tensor element tQ is constructed from the + base element's Q by taking the outer product with appropriately sized + identity matrices: + + .. code-block:: text + + tQ = Q ⊗ 𝟙ₛ₁ ⊗ 𝟙ₛ₂ ⊗ ... ⊗ 𝟙ₛₙ + + .. note:: + + When Q is returned, the contraction indices of the point set are + already free indices rather than being left in its shape (as either + ``num_points`` or ``num_points_factorX``). This is to avoid index + labelling confusion when performing the dual evaluation + contraction. + + .. note:: + + FIAT element dual bases are built from their ``Functional.pt_dict`` + properties. Therefore any FIAT dual bases with derivative nodes + represented via a ``Functional.deriv_dict`` property does not + currently have a FInAT dual basis. + ''' + raise NotImplementedError( + f"Dual basis not defined for element {type(self).__name__}" + ) + + def dual_evaluation(self, fn): + '''Get a GEM expression for performing the dual basis evaluation at + the nodes of the reference element. Currently only works for flat + elements: tensor elements are implemented in + :class:`TensorFiniteElement`. + + :param fn: Callable representing the function to dual evaluate. + Callable should take in an :class:`AbstractPointSet` and + return a GEM expression for evaluation of the function at + those points. + :returns: A tuple ``(dual_evaluation_gem_expression, basis_indices)`` + where the given ``basis_indices`` are those needed to form a + return expression for the code which is compiled from + ``dual_evaluation_gem_expression`` (alongside any argument + multiindices already encoded within ``fn``) + ''' + Q, x = self.dual_basis + + expr = fn(x) + # Apply targeted sum factorisation and delta elimination to + # the expression + sum_indices, factors = delta_elimination(*traverse_product(expr)) + expr = sum_factorise(sum_indices, factors) + # NOTE: any shape indices in the expression are because the + # expression is tensor valued. + assert expr.shape == Q.shape[len(Q.shape)-len(expr.shape):] + shape_indices = gem.indices(len(expr.shape)) + basis_indices = gem.indices(len(Q.shape) - len(expr.shape)) + Qi = Q[basis_indices + shape_indices] + expri = expr[shape_indices] + evaluation = gem.IndexSum(Qi * expri, x.indices + shape_indices) + # Now we want to factorise over the new contraction with x, + # ignoring any shape indices to avoid hitting the sum- + # factorisation index limit (this is a bit of a hack). + # Really need to do a more targeted job here. + evaluation = gem.optimise.contraction(evaluation, shape_indices) + return evaluation, basis_indices + + @abstractproperty + def mapping(self): + '''Appropriate mapping from the reference cell to a physical cell for + all basis functions of the finite element.''' + + +def entity_support_dofs(elem, entity_dim): + '''Return the map of entity id to the degrees of freedom for which + the corresponding basis functions take non-zero values. + + :arg elem: FInAT finite element + :arg entity_dim: Dimension of the cell subentity. + ''' + return elem.entity_support_dofs()[entity_dim] diff --git a/finat/guzman_neilan.py b/finat/guzman_neilan.py new file mode 100644 index 000000000..09c24c3da --- /dev/null +++ b/finat/guzman_neilan.py @@ -0,0 +1,34 @@ +import FIAT + +from finat.physically_mapped import Citations +from finat.piola_mapped import PiolaBubbleElement + + +class GuzmanNeilanFirstKindH1(PiolaBubbleElement): + """Pk^d enriched with Guzman-Neilan bubbles.""" + def __init__(self, cell, order=1): + if Citations is not None: + Citations().register("GuzmanNeilan2018") + super().__init__(FIAT.GuzmanNeilanFirstKindH1(cell, order=order)) + + +class GuzmanNeilanSecondKindH1(PiolaBubbleElement): + """C0 Pk^d(Alfeld) enriched with Guzman-Neilan bubbles.""" + def __init__(self, cell, order=1): + if Citations is not None: + Citations().register("GuzmanNeilan2018") + super().__init__(FIAT.GuzmanNeilanSecondKindH1(cell, order=order)) + + +class GuzmanNeilanBubble(GuzmanNeilanFirstKindH1): + """Modified Bernardi-Raugel bubbles that are C^0 P_dim(Alfeld) with constant divergence.""" + def __init__(self, cell, degree=None): + super().__init__(cell, order=0) + + +class GuzmanNeilanH1div(PiolaBubbleElement): + """Alfeld-Sorokina nodally enriched with Guzman-Neilan bubbles.""" + def __init__(self, cell, degree=None): + if Citations is not None: + Citations().register("GuzmanNeilan2018") + super().__init__(FIAT.GuzmanNeilanH1div(cell, degree=degree)) diff --git a/finat/hct.py b/finat/hct.py new file mode 100644 index 000000000..f072d8efe --- /dev/null +++ b/finat/hct.py @@ -0,0 +1,101 @@ +import FIAT +from math import comb +from gem import ListTensor + +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.argyris import _vertex_transform, _edge_transform, _normal_tangential_transform +from copy import deepcopy + + +class HsiehCloughTocher(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=3, avg=False): + if Citations is not None: + Citations().register("Clough1965") + if degree > 3: + Citations().register("Groselj2022") + self.avg = avg + super().__init__(FIAT.HsiehCloughTocher(cell, degree)) + + def basis_transformation(self, coordinate_mapping): + V = identity(self.space_dimension()) + + sd = self.cell.get_dimension() + top = self.cell.get_topology() + + vorder = 1 + eorder = self.degree - 3 + voffset = comb(sd + vorder, vorder) + _vertex_transform(V, vorder, self.cell, coordinate_mapping) + _edge_transform(V, vorder, eorder, self.cell, coordinate_mapping, avg=self.avg) + + # Patch up conditioning + h = coordinate_mapping.cell_size() + for v in sorted(top[0]): + s = voffset*v + 1 + V[:, s:s+sd] *= 1/h[v] + return ListTensor(V.T) + + +class ReducedHsiehCloughTocher(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=3): + if Citations is not None: + Citations().register("Clough1965") + super().__init__(FIAT.HsiehCloughTocher(cell, reduced=True)) + + reduced_dofs = deepcopy(self._element.entity_dofs()) + sd = cell.get_spatial_dimension() + for entity in reduced_dofs[sd-1]: + reduced_dofs[sd-1][entity] = [] + self._entity_dofs = reduced_dofs + + def basis_transformation(self, coordinate_mapping): + sd = self.cell.get_spatial_dimension() + top = self.cell.get_topology() + numbf = self._element.space_dimension() + ndof = self.space_dimension() + # rectangular to toss out the constraint dofs + V = identity(numbf, ndof) + + vorder = 1 + voffset = comb(sd + vorder, vorder) + _vertex_transform(V, vorder, self.cell, coordinate_mapping) + + # Jacobian at barycenter + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + for e in sorted(top[1]): + s = len(top[0]) * voffset + e + v0id, v1id = (v * voffset for v in top[1][e]) + Bnn, Bnt, Jt = _normal_tangential_transform(self.cell, J, detJ, e) + + # vertex points + V[s, v0id] = 1/5 * Bnt + V[s, v1id] = -1 * V[s, v0id] + + # vertex derivatives + for i in range(sd): + V[s, v1id+1+i] = 1/10 * Bnt * Jt[i] + V[s, v0id+1+i] = V[s, v1id+1+i] + + # Patch up conditioning + h = coordinate_mapping.cell_size() + for v in sorted(top[0]): + s = voffset * v + 1 + V[:, s:s+sd] *= 1/h[v] + return ListTensor(V.T) + + # This wipes out the edge dofs. FIAT gives a 12 DOF element + # because we need some extra functions to help with transforming + # under the edge constraint. However, we only have a 9 DOF + # element. + def entity_dofs(self): + return self._entity_dofs + + @property + def index_shape(self): + return (9,) + + def space_dimension(self): + return 9 diff --git a/finat/hdivcurl.py b/finat/hdivcurl.py new file mode 100644 index 000000000..a75c3f1fa --- /dev/null +++ b/finat/hdivcurl.py @@ -0,0 +1,225 @@ +from FIAT.hdivcurl import Hdiv, Hcurl +from FIAT.reference_element import LINE + +import gem +from gem.utils import cached_property +from finat.finiteelementbase import FiniteElementBase +from finat.tensor_product import TensorProductElement + + +class WrapperElementBase(FiniteElementBase): + """Common base class for H(div) and H(curl) element wrappers.""" + + def __init__(self, wrappee, transform): + super().__init__() + self.wrappee = wrappee + """An appropriate tensor product FInAT element whose basis + functions are mapped to produce an H(div) or H(curl) + conforming element.""" + + self.transform = transform + """A transformation applied on the scalar/vector values of the + wrapped element to produce an H(div) or H(curl) conforming + element.""" + + @property + def cell(self): + return self.wrappee.cell + + @property + def complex(self): + return self.wrappee.complex + + @property + def degree(self): + return self.wrappee.degree + + def entity_dofs(self): + return self.wrappee.entity_dofs() + + @property + def entity_permutations(self): + return self.wrappee.entity_permutations + + def entity_closure_dofs(self): + return self.wrappee.entity_closure_dofs() + + def entity_support_dofs(self): + return self.wrappee.entity_support_dofs() + + def space_dimension(self): + return self.wrappee.space_dimension() + + @property + def index_shape(self): + return self.wrappee.index_shape + + @property + def value_shape(self): + return (self.cell.get_spatial_dimension(),) + + def _transform_evaluation(self, core_eval): + beta = self.get_indices() + zeta = self.get_value_indices() + + def promote(table): + v = gem.partial_indexed(table, beta) + u = gem.ListTensor(self.transform(v)) + return gem.ComponentTensor(gem.Indexed(u, zeta), beta + zeta) + + return {alpha: promote(table) + for alpha, table in core_eval.items()} + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + core_eval = self.wrappee.basis_evaluation(order, ps, entity) + return self._transform_evaluation(core_eval) + + def point_evaluation(self, order, refcoords, entity=None): + core_eval = self.wrappee.point_evaluation(order, refcoords, entity) + return self._transform_evaluation(core_eval) + + @property + def dual_basis(self): + Q, x = self.wrappee.dual_basis + beta = self.get_indices() + zeta = self.get_value_indices() + # Index out the basis indices from wrapee's Q, to get + # something of wrappee.value_shape, then promote to new shape + # with the same transform as done for basis evaluation + Q = gem.ListTensor(self.transform(gem.partial_indexed(Q, beta))) + # Finally wrap up Q in shape again (now with some extra + # value_shape indices) + return gem.ComponentTensor(Q[zeta], beta + zeta), x + + +class HDivElement(WrapperElementBase): + """H(div) wrapper element for tensor product elements.""" + + def __init__(self, wrappee): + assert isinstance(wrappee, TensorProductElement) + if any(fe.formdegree is None for fe in wrappee.factors): + raise ValueError("Form degree of subelement is None, cannot H(div)!") + + formdegree = sum(fe.formdegree for fe in wrappee.factors) + if formdegree != wrappee.cell.get_spatial_dimension() - 1: + raise ValueError("H(div) requires (n-1)-form element!") + + transform = select_hdiv_transformer(wrappee) + super().__init__(wrappee, transform) + + @property + def formdegree(self): + return self.cell.get_spatial_dimension() - 1 + + @cached_property + def fiat_equivalent(self): + return Hdiv(self.wrappee.fiat_equivalent) + + @property + def mapping(self): + return "contravariant piola" + + +class HCurlElement(WrapperElementBase): + """H(curl) wrapper element for tensor product elements.""" + + def __init__(self, wrappee): + assert isinstance(wrappee, TensorProductElement) + if any(fe.formdegree is None for fe in wrappee.factors): + raise ValueError("Form degree of subelement is None, cannot H(curl)!") + + formdegree = sum(fe.formdegree for fe in wrappee.factors) + if formdegree != 1: + raise ValueError("H(curl) requires 1-form element!") + + transform = select_hcurl_transformer(wrappee) + super().__init__(wrappee, transform) + + @property + def formdegree(self): + return 1 + + @cached_property + def fiat_equivalent(self): + return Hcurl(self.wrappee.fiat_equivalent) + + @property + def mapping(self): + return "covariant piola" + + +def select_hdiv_transformer(element): + # Assume: something x interval + assert len(element.factors) == 2 + assert element.factors[1].cell.get_shape() == LINE + + # Globally consistent edge orientations of the reference + # quadrilateral: rightward horizontally, upward vertically. + # Their rotation by 90 degrees anticlockwise is interpreted as the + # positive direction for normal vectors. + ks = tuple(fe.formdegree for fe in element.factors) + if ks == (0, 1): + # Make the scalar value the leftward-pointing normal on the + # y-aligned edges. + return lambda v: [gem.Product(gem.Literal(-1), v), gem.Zero()] + elif ks == (1, 0): + # Make the scalar value the upward-pointing normal on the + # x-aligned edges. + return lambda v: [gem.Zero(), v] + elif ks == (2, 0): + # Same for 3D, so z-plane. + return lambda v: [gem.Zero(), gem.Zero(), v] + elif ks == (1, 1): + if element.mapping == "contravariant piola": + # Pad the 2-vector normal on the "base" cell into a + # 3-vector, maintaining direction. + return lambda v: [gem.Indexed(v, (0,)), + gem.Indexed(v, (1,)), + gem.Zero()] + elif element.mapping == "covariant piola": + # Rotate the 2-vector tangential component on the "base" + # cell 90 degrees anticlockwise into a 3-vector and pad. + return lambda v: [gem.Indexed(v, (1,)), + gem.Product(gem.Literal(-1), gem.Indexed(v, (0,))), + gem.Zero()] + else: + assert False, "Unexpected original mapping!" + else: + assert False, "Unexpected form degree combination!" + + +def select_hcurl_transformer(element): + # Assume: something x interval + assert len(element.factors) == 2 + assert element.factors[1].cell.get_shape() == LINE + + # Globally consistent edge orientations of the reference + # quadrilateral: rightward horizontally, upward vertically. + # Tangential vectors interpret these as the positive direction. + dim = element.cell.get_spatial_dimension() + ks = tuple(fe.formdegree for fe in element.factors) + if element.mapping == "affine": + if ks == (1, 0): + # Can only be 2D. Make the scalar value the + # rightward-pointing tangential on the x-aligned edges. + return lambda v: [v, gem.Zero()] + elif ks == (0, 1): + # Can be any spatial dimension. Make the scalar value the + # upward-pointing tangential. + return lambda v: [gem.Zero()] * (dim - 1) + [v] + else: + assert False + elif element.mapping == "covariant piola": + # Second factor must be continuous interval. Just padding. + return lambda v: [gem.Indexed(v, (0,)), + gem.Indexed(v, (1,)), + gem.Zero()] + elif element.mapping == "contravariant piola": + # Second factor must be continuous interval. Rotate the + # 2-vector tangential component on the "base" cell 90 degrees + # clockwise into a 3-vector and pad. + return lambda v: [gem.Product(gem.Literal(-1), gem.Indexed(v, (1,))), + gem.Indexed(v, (0,)), + gem.Zero()] + else: + assert False, "Unexpected original mapping!" diff --git a/finat/hermite.py b/finat/hermite.py new file mode 100644 index 000000000..ae54bdc1b --- /dev/null +++ b/finat/hermite.py @@ -0,0 +1,32 @@ +import FIAT +from gem import ListTensor + +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement + + +class Hermite(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=3): + if Citations is not None: + Citations().register("Ciarlet1972") + super().__init__(FIAT.CubicHermite(cell)) + + def basis_transformation(self, coordinate_mapping): + Js = [coordinate_mapping.jacobian_at(vertex) + for vertex in self.cell.get_vertices()] + + h = coordinate_mapping.cell_size() + + d = self.cell.get_dimension() + M = identity(self.space_dimension()) + + cur = 0 + for i in range(d+1): + cur += 1 # skip the vertex + J = Js[i] + for j in range(d): + for k in range(d): + M[cur+j, cur+k] = J[j, k] / h[i] + cur += d + + return ListTensor(M) diff --git a/finat/hz.py b/finat/hz.py new file mode 100644 index 000000000..54e4502ed --- /dev/null +++ b/finat/hz.py @@ -0,0 +1,48 @@ +"""Implementation of the Hu-Zhang finite elements.""" +import FIAT +from gem import ListTensor +from finat.fiat_elements import FiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.aw import _facet_transform, _evaluation_transform + + +class HuZhang(PhysicallyMappedElement, FiatElement): + def __init__(self, cell, degree=3, variant=None): + if Citations is not None: + Citations().register("Hu2015") + self.variant = variant + super().__init__(FIAT.HuZhang(cell, degree, variant=variant)) + + def basis_transformation(self, coordinate_mapping): + ndofs = self.space_dimension() + V = identity(ndofs) + + sd = self.cell.get_spatial_dimension() + W = _evaluation_transform(self.cell, coordinate_mapping) + + # Put into the right rows and columns. + V[0:3, 0:3] = V[3:6, 3:6] = V[6:9, 6:9] = W + ncomp = W.shape[0] + num_verts = sd+1 + cur = num_verts * ncomp + + Vsub = _facet_transform(self.cell, self.degree-2, coordinate_mapping) + fdofs = Vsub.shape[0] + V[cur:cur+fdofs, cur:cur+fdofs] = Vsub + cur += fdofs + + # internal DOFs + if self.variant == "point": + while cur < ndofs: + V[cur:cur+ncomp, cur:cur+ncomp] = W + cur += ncomp + + # RESCALING FOR CONDITIONING + h = coordinate_mapping.cell_size() + for e in range(num_verts): + V[:, ncomp*e:ncomp*(e+1)] *= 1/(h[e] * h[e]) + + # Note: that the edge DOFs are scaled by edge lengths in FIAT implies + # that they are already have the necessary rescaling to improve + # conditioning. + return ListTensor(V.T) diff --git a/finat/johnson_mercier.py b/finat/johnson_mercier.py new file mode 100644 index 000000000..8dcaab51e --- /dev/null +++ b/finat/johnson_mercier.py @@ -0,0 +1,29 @@ +import FIAT +from gem import ListTensor + +from finat.aw import _facet_transform +from finat.fiat_elements import FiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement + + +class JohnsonMercier(PhysicallyMappedElement, FiatElement): # symmetric matrix valued + def __init__(self, cell, degree=1, variant=None): + if Citations is not None: + Citations().register("Gopalakrishnan2024") + self._indices = slice(None, None) + super().__init__(FIAT.JohnsonMercier(cell, degree, variant=variant)) + + def basis_transformation(self, coordinate_mapping): + numbf = self._element.space_dimension() + ndof = self.space_dimension() + + V = identity(numbf, ndof) + Vsub = _facet_transform(self.cell, 1, coordinate_mapping) + Vsub = Vsub[:, self._indices] + m, n = Vsub.shape + V[:m, :n] = Vsub + + # Note: that the edge DOFs are scaled by edge lengths in FIAT implies + # that they are already have the necessary rescaling to improve + # conditioning. + return ListTensor(V.T) diff --git a/finat/mixed.py b/finat/mixed.py new file mode 100644 index 000000000..e8b371c3b --- /dev/null +++ b/finat/mixed.py @@ -0,0 +1,100 @@ +import numpy + +import gem + +from finat.finiteelementbase import FiniteElementBase +from finat.enriched import EnrichedElement + + +def MixedElement(elements): + """Constructor function for FEniCS-style mixed elements. + + Implements mixed element using :py:class:`EnrichedElement` and + value shape transformations with :py:class:`MixedSubElement`. + """ + sizes = [numpy.prod(element.value_shape, dtype=int) + for element in elements] + offsets = [int(offset) for offset in numpy.cumsum([0] + sizes)] + total_size = offsets.pop() + return EnrichedElement([MixedSubElement(element, total_size, offset) + for offset, element in zip(offsets, elements)]) + + +class MixedSubElement(FiniteElementBase): + """Element wrapper that flattens value shape and places the flattened + vector in a longer vector of zeros.""" + + def __init__(self, element, size, offset): + assert 0 <= offset <= size + assert offset + numpy.prod(element.value_shape, dtype=int) <= size + + super().__init__() + self.element = element + self.size = size + self.offset = offset + + @property + def cell(self): + return self.element.cell + + @property + def complex(self): + return self.element.complex + + @property + def degree(self): + return self.element.degree + + @property + def formdegree(self): + return self.element.formdegree + + def entity_dofs(self): + return self.element.entity_dofs() + + def entity_closure_dofs(self): + return self.element.entity_closure_dofs() + + def entity_support_dofs(self): + return self.element.entity_support_dofs() + + def space_dimension(self): + return self.element.space_dimension() + + @property + def index_shape(self): + return self.element.index_shape + + @property + def value_shape(self): + return (self.size,) + + def _transform(self, v): + u = [gem.Zero()] * self.size + for j, zeta in enumerate(numpy.ndindex(self.element.value_shape)): + u[self.offset + j] = gem.Indexed(v, zeta) + return u + + def _transform_evaluation(self, core_eval): + beta = self.get_indices() + zeta = self.get_value_indices() + + def promote(table): + v = gem.partial_indexed(table, beta) + u = gem.ListTensor(self._transform(v)) + return gem.ComponentTensor(gem.Indexed(u, zeta), beta + zeta) + + return {alpha: promote(table) + for alpha, table in core_eval.items()} + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + core_eval = self.element.basis_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping) + return self._transform_evaluation(core_eval) + + def point_evaluation(self, order, refcoords, entity=None): + core_eval = self.element.point_evaluation(order, refcoords, entity) + return self._transform_evaluation(core_eval) + + @property + def mapping(self): + return self.element.mapping diff --git a/finat/morley.py b/finat/morley.py new file mode 100644 index 000000000..986a4471b --- /dev/null +++ b/finat/morley.py @@ -0,0 +1,46 @@ +import FIAT + +from gem import ListTensor + +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement + + +class Morley(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("Morley1971") + super().__init__(FIAT.Morley(cell)) + + def basis_transformation(self, coordinate_mapping): + # Jacobians at edge midpoints + J = coordinate_mapping.jacobian_at([1/3, 1/3]) + + rns = coordinate_mapping.reference_normals() + pns = coordinate_mapping.physical_normals() + + pts = coordinate_mapping.physical_tangents() + + pel = coordinate_mapping.physical_edge_lengths() + + V = identity(self.space_dimension()) + + for i in range(3): + V[i+3, i+3] = (rns[i, 0]*(pns[i, 0]*J[0, 0] + pns[i, 1]*J[1, 0]) + + rns[i, 1]*(pns[i, 0]*J[0, 1] + pns[i, 1]*J[1, 1])) + + for i, c in enumerate([(1, 2), (0, 2), (0, 1)]): + B12 = (rns[i, 0]*(pts[i, 0]*J[0, 0] + pts[i, 1]*J[1, 0]) + + rns[i, 1]*(pts[i, 0]*J[0, 1] + pts[i, 1]*J[1, 1])) + V[3+i, c[0]] = -1*B12 / pel[i] + V[3+i, c[1]] = B12 / pel[i] + + # diagonal post-scaling to patch up conditioning + h = coordinate_mapping.cell_size() + + for e in range(3): + v0id, v1id = [i for i in range(3) if i != e] + for i in range(6): + V[i, 3+e] = 2*V[i, 3+e] / (h[v0id] + h[v1id]) + + return ListTensor(V.T) diff --git a/finat/mtw.py b/finat/mtw.py new file mode 100644 index 000000000..55d1a1f71 --- /dev/null +++ b/finat/mtw.py @@ -0,0 +1,50 @@ +import FIAT + +from gem import ListTensor + +from finat.fiat_elements import FiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement +from finat.piola_mapped import normal_tangential_edge_transform +from copy import deepcopy + + +class MardalTaiWinther(PhysicallyMappedElement, FiatElement): + def __init__(self, cell, degree=3): + if Citations is not None: + Citations().register("Mardal2002") + super().__init__(FIAT.MardalTaiWinther(cell, degree)) + + reduced_dofs = deepcopy(self._element.entity_dofs()) + sd = cell.get_spatial_dimension() + fdofs = sd + 1 + reduced_dofs[sd][0] = [] + for f in reduced_dofs[sd-1]: + reduced_dofs[sd-1][f] = reduced_dofs[sd-1][f][:fdofs] + self._entity_dofs = reduced_dofs + self._space_dimension = fdofs * len(reduced_dofs[sd-1]) + + def basis_transformation(self, coordinate_mapping): + numbf = self._element.space_dimension() + ndof = self.space_dimension() + V = identity(numbf, ndof) + + sd = self.cell.get_spatial_dimension() + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + entity_dofs = self.entity_dofs() + for f in sorted(entity_dofs[sd-1]): + cur = entity_dofs[sd-1][f][0] + V[cur+1, cur:cur+sd] = normal_tangential_edge_transform(self.cell, J, detJ, f) + + return ListTensor(V.T) + + def entity_dofs(self): + return self._entity_dofs + + @property + def index_shape(self): + return (self._space_dimension,) + + def space_dimension(self): + return self._space_dimension diff --git a/finat/nodal_enriched.py b/finat/nodal_enriched.py new file mode 100644 index 000000000..065394a01 --- /dev/null +++ b/finat/nodal_enriched.py @@ -0,0 +1,12 @@ +import FIAT + +from finat.fiat_elements import FiatElement + + +class NodalEnrichedElement(FiatElement): + """An enriched element with a nodal basis.""" + + def __init__(self, elements): + nodal_enriched = FIAT.NodalEnrichedElement(*(elem.fiat_equivalent + for elem in elements)) + super().__init__(nodal_enriched) diff --git a/finat/physically_mapped.py b/finat/physically_mapped.py new file mode 100644 index 000000000..2ea497614 --- /dev/null +++ b/finat/physically_mapped.py @@ -0,0 +1,388 @@ +from abc import ABCMeta, abstractmethod + +import gem +import numpy + +try: + from firedrake_citations import Citations + Citations().add("Kirby2018zany", """ +@Article{Kirby2018zany, + author = {Robert C. Kirby}, + title = {A general approach to transforming finite elements}, + journal = {SMAI Journal of Computational Mathematics}, + year = 2018, + volume = 4, + pages = {197-224}, + doi = {10.5802/smai-jcm.33}, + archiveprefix ={arXiv}, + eprint = {1706.09017}, + primaryclass = {math.NA} +} +""") + Citations().add("Kirby2019zany", """ +@Article{Kirby:2019, + author = {Robert C. Kirby and Lawrence Mitchell}, + title = {Code generation for generally mapped finite + elements}, + journal = {ACM Transactions on Mathematical Software}, + year = 2019, + volume = 45, + number = 41, + pages = {41:1--41:23}, + doi = {10.1145/3361745}, + archiveprefix ={arXiv}, + eprint = {1808.05513}, + primaryclass = {cs.MS} +}""") + Citations().add("Clough1965", """ +@inproceedings{Clough1965, + author = {R. W. Clough, J. L. Tocher}, + title = {Finite element stiffness matrices for analysis of plate bending}, + booktitle = {Proc. of the First Conf. on Matrix Methods in Struct. Mech}, + year = 1965, + pages = {515-546}, +} +""") + Citations().add("Argyris1968", """ +@Article{Argyris1968, + author = {J. H. Argyris and I. Fried and D. W. Scharpf}, + title = {{The TUBA family of plate elements for the matrix + displacement method}}, + journal = {The Aeronautical Journal}, + year = 1968, + volume = 72, + pages = {701-709}, + doi = {10.1017/S000192400008489X} +} +""") + Citations().add("Bell1969", """ +@Article{Bell1969, + author = {Kolbein Bell}, + title = {A refined triangular plate bending finite element}, + journal = {International Journal for Numerical Methods in + Engineering}, + year = 1969, + volume = 1, + number = 1, + pages = {101-122}, + doi = {10.1002/nme.1620010108} +} +""") + Citations().add("Ciarlet1972", r""" +@Article{Ciarlet1972, + author = {P. G. Ciarlet and P. A. Raviart}, + title = {{General Lagrange and Hermite interpolation in + $\mathbb{R}^n$ with applications to finite element + methods}}, + journal = {Archive for Rational Mechanics and Analysis}, + year = 1972, + volume = 46, + number = 3, + pages = {177-199}, + doi = {10.1007/BF0025245} +} +""") + Citations().add("Morley1971", """ +@Article{Morley1971, + author = {L. S. D. Morley}, + title = {The constant-moment plate-bending element}, + journal = {The Journal of Strain Analysis for Engineering + Design}, + year = 1971, + volume = 6, + number = 1, + pages = {20-24}, + doi = {10.1243/03093247V061020} +} +""") + Citations().add("Mardal2002", """ +@article{Mardal2002, + doi = {10.1137/s0036142901383910}, + year = 2002, + volume = {40}, + number = {5}, + pages = {1605--1631}, + author = {Mardal, K.-A.~ and Tai, X.-C.~ and Winther, R.~}, + title = {A robust finite element method for {Darcy--Stokes} flow}, + journal = {{SIAM} Journal on Numerical Analysis} +} +""") + Citations().add("Arnold2002", """ +@article{Arnold2002, + doi = {10.1007/s002110100348}, + year = 2002, + volume = {92}, + number = {3}, + pages = {401--419}, + author = {Arnold, R.~N.~ and Winther, R.~}, + title = {Mixed finite elements for elasticity}, + journal = {Numerische Mathematik} +} +""") + Citations().add("Arnold2003", """ +@article{arnold2003, + doi = {10.1142/s0218202503002507}, + year = 2003, + volume = {13}, + number = {03}, + pages = {295--307}, + author = {Arnold, D.~N.~ and Winther, R.~}, + title = {Nonconforming mixed elements for elasticity}, + journal = {Mathematical Models and Methods in Applied Sciences} +} +""") + Citations().add("Hu2015", """ +@article{Hu2015, + author = {Hu, J.~ and Zhang, S.~}, + title = {A family of conforming mixed finite elements for linear elasticity on triangular grids}, + year = {2015}, + month = jan, + archiveprefix = {arXiv}, + eprint = {1406.7457}, +} +""") + Citations().add("Arbogast2017", """ +@techreport{Arbogast2017, + title={Direct serendipity finite elements on convex quadrilaterals}, + author={Arbogast, T and Tao, Z}, + year={2017}, + institution={Tech. Rep. ICES REPORT 17-28, Institute for Computational Engineering and Sciences} +} +""") + Citations().add("Gopalakrishnan2024", """ +@article{gopalakrishnan2024johnson, + title={{The Johnson-Mercier elasticity element in any dimensions}}, + author={Gopalakrishnan, J and Guzman, J and Lee, J J}, + journal={arXiv preprint arXiv:2403.13189}, + year={2024} +} +""") + Citations().add("Groselj2022", """ +@article{groselj2022generalized, + title={{Generalized C1 Clough--Tocher splines for CAGD and FEM}}, + author={Gro{\v{s}}elj, Jan and Knez, Marjeta}, + journal={Computer Methods in Applied Mechanics and Engineering}, + volume={395}, + pages={114983}, + year={2022}, + publisher={Elsevier} +} +""") + Citations().add("PowellSabin1977", """ +@article{powell1977piecewise, + title={Piecewise quadratic approximations on triangles}, + author={Powell, Michael JD and Sabin, Malcolm A}, + journal={ACM Transactions on Mathematical Software}, + volume={3}, + number={4}, + pages={316--325}, + year={1977}, + publisher={ACM New York, NY, USA} +} +""") + Citations().add("AlfeldSorokina2016", """ +@article{alfeld2016linear, + title={Linear differential operators on bivariate spline spaces and spline vector fields}, + author={Alfeld, Peter and Sorokina, Tatyana}, + journal={BIT Numerical Mathematics}, + volume={56}, + number={1}, + pages={15--32}, + year={2016}, + publisher={Springer} +} +""") + Citations().add("ArnoldQin1992", """ +@article{arnold1992quadratic, + title={{Quadratic velocity/linear pressure Stokes elements}}, + author={Arnold, Douglas N and Qin, Jinshui}, + journal={Advances in computer methods for partial differential equations}, + volume={7}, + pages={28--34}, + year={1992} +} +""") + Citations().add("ChristiansenHu2019", """ +@article{christiansen2019finite, + title={A finite element for Stokes with a commuting diagram }, + author={Christiansen, Snorre H and Hu, Kaibo}, + journal={Mathematical Analysis in Fluid and Gas Dynamics}, + volume={2107}, + pages={172--183}, + year={2019} +} +""") + Citations().add("GuzmanNeilan2018", """ +@article{guzman2018infsup, + author = {Guzm\'{a}n, Johnny and Neilan, Michael}, + title = {{Inf-Sup Stable Finite Elements on Barycentric Refinements Producing Divergence--Free Approximations in Arbitrary Dimensions}}, + journal = {SIAM Journal on Numerical Analysis}, + volume = {56}, + number = {5}, + pages = {2826-2844}, + year = {2018}, + doi = {10.1137/17M1153467} +} +""") + Citations().add("BernardiRaugel1985", """ +@article{bernardi-raugel-0, + AUTHOR = {Bernardi, Christine and Raugel, Genevi\\`eve}, + TITLE = {Analysis of some finite elements for the {Stokes} problem}, + JOURNAL = {Mathematics of Computation}, + VOLUME = {44}, + YEAR = {1985}, + DOI = {10.1090/S0025-5718-1985-0771031-7}, + PAGES = {{71--79}} +} +""") + + +except ImportError: + Citations = None + + +class NeedsCoordinateMappingElement(metaclass=ABCMeta): + """Abstract class for elements that require physical information + either to map or construct their basis functions.""" + pass + + +class PhysicallyMappedElement(NeedsCoordinateMappingElement): + """A mixin that applies a "physical" transformation to tabulated + basis functions.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if Citations is not None: + Citations().register("Kirby2018zany") + Citations().register("Kirby2019zany") + + @abstractmethod + def basis_transformation(self, coordinate_mapping): + """Transformation matrix for the basis functions. + + :arg coordinate_mapping: Object providing physical geometry.""" + pass + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + assert coordinate_mapping is not None + + M = self.basis_transformation(coordinate_mapping) + M, = gem.optimise.constant_fold_zero((M,)) + + def matvec(table): + table, = gem.optimise.constant_fold_zero((table,)) + i, j = gem.indices(2) + value_indices = self.get_value_indices() + table = gem.Indexed(table, (j, ) + value_indices) + val = gem.ComponentTensor(gem.IndexSum(M[i, j]*table, (j,)), (i,) + value_indices) + # Eliminate zeros + return gem.optimise.aggressive_unroll(val) + + result = super().basis_evaluation(order, ps, entity=entity) + + return {alpha: matvec(table) + for alpha, table in result.items()} + + def point_evaluation(self, order, refcoords, entity=None): + raise NotImplementedError("TODO: not yet thought about it") + + +class DirectlyDefinedElement(NeedsCoordinateMappingElement): + """Base class for directly defined elements such as direct + serendipity that bypass a coordinate mapping.""" + pass + + +class PhysicalGeometry(metaclass=ABCMeta): + + @abstractmethod + def cell_size(self): + """The cell size at each vertex. + + :returns: A GEM expression for the cell size, shape (nvertex, ). + """ + + @abstractmethod + def jacobian_at(self, point): + """The jacobian of the physical coordinates at a point. + + :arg point: The point in reference space (on the cell) to + evaluate the Jacobian. + :returns: A GEM expression for the Jacobian, shape (gdim, tdim). + """ + + @abstractmethod + def detJ_at(self, point): + """The determinant of the jacobian of the physical coordinates at a point. + + :arg point: The point in reference space to evaluate the Jacobian determinant. + :returns: A GEM expression for the Jacobian determinant. + """ + + @abstractmethod + def reference_normals(self): + """The (unit) reference cell normals for each facet. + + :returns: A GEM expression for the normal to each + facet (numbered according to FIAT conventions), shape + (nfacet, tdim). + """ + + @abstractmethod + def physical_normals(self): + """The (unit) physical cell normals for each facet. + + :returns: A GEM expression for the normal to each + facet (numbered according to FIAT conventions). These are + all computed by a clockwise rotation of the physical + tangents, shape (nfacet, gdim). + """ + + @abstractmethod + def physical_tangents(self): + """The (unit) physical cell tangents on each facet. + + :returns: A GEM expression for the tangent to each + facet (numbered according to FIAT conventions). These + always point from low to high numbered local vertex, shape + (nfacet, gdim). + """ + + @abstractmethod + def physical_edge_lengths(self): + """The length of each edge of the physical cell. + + :returns: A GEM expression for the length of each + edge (numbered according to FIAT conventions), shape + (nfacet, ). + """ + + @abstractmethod + def physical_points(self, point_set, entity=None): + """Maps reference element points to GEM for the physical coordinates + + :arg point_set: A point_set on the reference cell to push forward to physical space. + :arg entity: Reference cell entity on which the point set is + defined (for example if it is a point set on a facet). + :returns: a GEM expression for the physical locations of the + points, shape (gdim, ) with free indices of the point_set. + """ + + @abstractmethod + def physical_vertices(self): + """Physical locations of the cell vertices. + + :returns: a GEM expression for the physical vertices, shape + (gdim, ).""" + + +zero = gem.Zero() +one = gem.Literal(1.0) + + +def identity(*shape): + V = numpy.eye(*shape, dtype=object) + for multiindex in numpy.ndindex(V.shape): + V[multiindex] = zero if V[multiindex] == 0 else one + return V diff --git a/finat/piola_mapped.py b/finat/piola_mapped.py new file mode 100644 index 000000000..18f51e99e --- /dev/null +++ b/finat/piola_mapped.py @@ -0,0 +1,163 @@ +import numpy + +from finat.fiat_elements import FiatElement +from finat.physically_mapped import identity, PhysicallyMappedElement +from gem import Literal, ListTensor +from copy import deepcopy + + +def determinant(A): + """Return the determinant of A""" + n = A.shape[0] + if n == 0: + return 1 + elif n == 1: + return A[0, 0] + elif n == 2: + return A[0, 0] * A[1, 1] - A[0, 1] * A[1, 0] + else: + detA = A[0, 0] * determinant(A[1:, 1:]) + cols = numpy.ones(A.shape[1], dtype=bool) + for j in range(1, n): + cols[j] = False + detA += (-1)**j * A[0, j] * determinant(A[1:][:, cols]) + cols[j] = True + return detA + + +def adjugate(A): + """Return the adjugate matrix of A""" + A = numpy.asarray(A) + C = numpy.zeros_like(A) + rows = numpy.ones(A.shape[0], dtype=bool) + cols = numpy.ones(A.shape[1], dtype=bool) + for i in range(A.shape[0]): + rows[i] = False + for j in range(A.shape[1]): + cols[j] = False + C[j, i] = (-1)**(i+j)*determinant(A[rows, :][:, cols]) + cols[j] = True + rows[i] = True + return C + + +def piola_inverse(fiat_cell, J, detJ): + """Return the basis transformation of evaluation at a point. + This simply inverts the Piola transform inv(J / detJ) = adj(J).""" + sd = fiat_cell.get_spatial_dimension() + Jnp = numpy.array([[J[i, j] for j in range(sd)] for i in range(sd)]) + return adjugate(Jnp) + + +def normal_tangential_edge_transform(fiat_cell, J, detJ, f): + """Return the basis transformation of + normal and tangential edge moments""" + R = numpy.array([[0, 1], [-1, 0]]) + that = fiat_cell.compute_edge_tangent(f) + that /= numpy.linalg.norm(that) + nhat = R @ that + Jn = J @ Literal(nhat) + Jt = J @ Literal(that) + alpha = Jn @ Jt + beta = Jt @ Jt + # Compute the last row of inv([[1, 0], [alpha/detJ, beta/detJ]]) + row = (-1 * alpha / beta, detJ / beta) + return row + + +def normal_tangential_face_transform(fiat_cell, J, detJ, f): + """Return the basis transformation of + normal and tangential face moments""" + # Compute the reciprocal basis + thats = fiat_cell.compute_tangents(2, f) + nhat = numpy.cross(*thats) + nhat /= numpy.dot(nhat, nhat) + orth_vecs = numpy.array([nhat, + numpy.cross(nhat, thats[1]), + numpy.cross(thats[0], nhat)]) + # Compute A = (alpha, beta, gamma) + Jts = J @ Literal(thats.T) + Jorths = J @ Literal(orth_vecs.T) + A = Jorths.T @ Jts + # Compute the last two rows of inv([[1, 0, 0], A.T/detJ]) + det0 = A[1, 0] * A[2, 1] - A[1, 1] * A[2, 0] + det1 = A[2, 0] * A[0, 1] - A[2, 1] * A[0, 0] + det2 = A[0, 0] * A[1, 1] - A[0, 1] * A[1, 0] + scale = detJ / det0 + rows = ((-1 * det1 / det0, -1 * scale * A[2, 1], scale * A[2, 0]), + (-1 * det2 / det0, scale * A[1, 1], -1 * scale * A[1, 0])) + return rows + + +class PiolaBubbleElement(PhysicallyMappedElement, FiatElement): + """A general class to transform Piola-mapped elements with normal facet bubbles.""" + def __init__(self, fiat_element): + mapping, = set(fiat_element.mapping()) + if mapping != "contravariant piola": + raise ValueError(f"{type(fiat_element).__name__} needs to be Piola mapped.") + super().__init__(fiat_element) + + # On each facet we expect the normal dof followed by the tangential ones + # The tangential dofs should be numbered last, and are constrained to be zero + sd = self.cell.get_spatial_dimension() + reduced_dofs = deepcopy(self._element.entity_dofs()) + reduced_dim = 0 + cur = reduced_dofs[sd-1][0][0] + for entity in sorted(reduced_dofs[sd-1]): + reduced_dim += len(reduced_dofs[sd-1][entity][1:]) + reduced_dofs[sd-1][entity] = [cur] + cur += 1 + self._entity_dofs = reduced_dofs + self._space_dimension = fiat_element.space_dimension() - reduced_dim + + def entity_dofs(self): + return self._entity_dofs + + @property + def index_shape(self): + return (self._space_dimension,) + + def space_dimension(self): + return self._space_dimension + + def basis_transformation(self, coordinate_mapping): + sd = self.cell.get_spatial_dimension() + bary, = self.cell.make_points(sd, 0, sd+1) + J = coordinate_mapping.jacobian_at(bary) + detJ = coordinate_mapping.detJ_at(bary) + + dofs = self.entity_dofs() + bfs = self._element.entity_dofs() + ndof = self.space_dimension() + numbf = self._element.space_dimension() + V = identity(numbf, ndof) + + # Undo the Piola transform for non-facet bubble basis functions + nodes = self._element.get_dual_set().nodes + Finv = piola_inverse(self.cell, J, detJ) + for dim in dofs: + if dim == sd-1: + continue + for e in sorted(dofs[dim]): + k = 0 + while k < len(dofs[dim][e]): + cur = dofs[dim][e][k] + if len(nodes[cur].deriv_dict) > 0: + V[cur, cur] = detJ + k += 1 + else: + s = dofs[dim][e][k:k+sd] + V[numpy.ix_(s, s)] = Finv + k += sd + # Unpick the normal component for the facet bubbles + if sd == 2: + transform = normal_tangential_edge_transform + elif sd == 3: + transform = normal_tangential_face_transform + + for f in sorted(dofs[sd-1]): + rows = numpy.asarray(transform(self.cell, J, detJ, f)) + cur_dofs = dofs[sd-1][f] + cur_bfs = bfs[sd-1][f][1:] + V[numpy.ix_(cur_bfs, cur_dofs)] = rows[..., :len(cur_dofs)] + return ListTensor(V.T) diff --git a/finat/point_set.py b/finat/point_set.py new file mode 100644 index 000000000..1497308c7 --- /dev/null +++ b/finat/point_set.py @@ -0,0 +1,203 @@ +from abc import ABCMeta, abstractproperty +from itertools import chain, product + +import numpy + +import gem +from gem.utils import cached_property + + +class AbstractPointSet(metaclass=ABCMeta): + """A way of specifying a known set of points, perhaps with some + (tensor) structure. + + Points, when stored, have shape point_set_shape + (point_dimension,) + where point_set_shape is () for scalar, (N,) for N element vector, + (N, M) for N x M matrix etc. + """ + + @abstractproperty + def points(self): + """A flattened numpy array of points or ``UnknownPointsArray`` + object with shape (# of points, point dimension).""" + + @property + def dimension(self): + """Point dimension.""" + _, dim = self.points.shape + return dim + + @abstractproperty + def indices(self): + """GEM indices with matching shape and extent to the structure of the + point set.""" + + @abstractproperty + def expression(self): + """GEM expression describing the points, with free indices + ``self.indices`` and shape (point dimension,).""" + + +class PointSingleton(AbstractPointSet): + """A point set representing a single point. + + These have a ``gem.Literal`` expression and no indices.""" + + def __init__(self, point): + """Build a PointSingleton from a single point. + + :arg point: A single point of shape (D,) where D is the dimension of + the point.""" + point = numpy.asarray(point) + # 1 point ought to be a 1D array - see docstring above and points method + assert len(point.shape) == 1 + self.point = point + + @cached_property + def points(self): + # Make sure we conform to the expected (# of points, point dimension) + # shape + return self.point.reshape(1, -1) + + indices = () + + @cached_property + def expression(self): + return gem.Literal(self.point) + + +class UnknownPointsArray(): + """A placeholder for a set of unknown points with appropriate length + and size but without indexable values. For use with + :class:`AbstractPointSet`s whose points are not known at compile + time.""" + def __init__(self, shape): + """ + :arg shape: The shape of the unknown set of N points of shape + (N, D) where D is the dimension of each point. + """ + assert len(shape) == 2 + self.shape = shape + + def __len__(self): + return self.shape[0] + + +class UnknownPointSet(AbstractPointSet): + """A point set representing a vector of points with unknown + locations but known ``gem.Variable`` expression. + + The ``.points`` property is an `UnknownPointsArray` object with + shape (N, D) where N is the number of points and D is their + dimension. + + The ``.expression`` property is a derived `gem.partial_indexed` with + shape (D,) and free indices for the points N.""" + + def __init__(self, points_expr): + r"""Build a PointSingleton from a gem expression for a single point. + + :arg points_expr: A ``gem.Variable`` expression representing a + vector of N points in D dimensions. Should have shape (N, D) + and no free indices. For runtime tabulation the variable + name should begin with \'rt_:\'.""" + assert isinstance(points_expr, gem.Variable) + assert points_expr.free_indices == () + assert len(points_expr.shape) == 2 + self._points_expr = points_expr + + @cached_property + def points(self): + return UnknownPointsArray(self._points_expr.shape) + + @cached_property + def indices(self): + N, _ = self._points_expr.shape + return (gem.Index(extent=N),) + + @cached_property + def expression(self): + return gem.partial_indexed(self._points_expr, self.indices) + + +class PointSet(AbstractPointSet): + """A basic point set with no internal structure representing a vector of + points.""" + + def __init__(self, points): + """Build a PointSet from a vector of points + + :arg points: A vector of N points of shape (N, D) where D is the + dimension of each point.""" + points = numpy.asarray(points) + assert len(points.shape) == 2 + self.points = points + + @cached_property + def points(self): + pass # set at initialisation + + @cached_property + def indices(self): + return (gem.Index(extent=len(self.points)),) + + @cached_property + def expression(self): + return gem.partial_indexed(gem.Literal(self.points), self.indices) + + def almost_equal(self, other, tolerance=1e-12): + """Approximate numerical equality of point sets""" + return type(self) is type(other) and \ + self.points.shape == other.points.shape and \ + numpy.allclose(self.points, other.points, rtol=0, atol=tolerance) + + +class GaussLegendrePointSet(PointSet): + """Gauss-Legendre quadrature points on the interval. + + This facilitates implementing discontinuous spectral elements. + """ + def __init__(self, points): + super().__init__(points) + assert self.points.shape[1] == 1 + + +class GaussLobattoLegendrePointSet(PointSet): + """Gauss-Lobatto-Legendre quadrature points on the interval. + + This facilitates implementing continuous spectral elements. + """ + def __init__(self, points): + super().__init__(points) + assert self.points.shape[1] == 1 + + +class TensorPointSet(AbstractPointSet): + + def __init__(self, factors): + self.factors = tuple(factors) + + @cached_property + def points(self): + return numpy.array([list(chain(*pt_tuple)) + for pt_tuple in product(*[ps.points + for ps in self.factors])]) + + @cached_property + def indices(self): + return tuple(chain(*[ps.indices for ps in self.factors])) + + @cached_property + def expression(self): + result = [] + for point_set in self.factors: + for i in range(point_set.dimension): + result.append(gem.Indexed(point_set.expression, (i,))) + return gem.ListTensor(result) + + def almost_equal(self, other, tolerance=1e-12): + """Approximate numerical equality of point sets""" + return type(self) is type(other) and \ + len(self.factors) == len(other.factors) and \ + all(s.almost_equal(o, tolerance=tolerance) + for s, o in zip(self.factors, other.factors)) diff --git a/finat/powell_sabin.py b/finat/powell_sabin.py new file mode 100644 index 000000000..d82e7f3f5 --- /dev/null +++ b/finat/powell_sabin.py @@ -0,0 +1,64 @@ +import FIAT +from gem import ListTensor + +from finat.argyris import _edge_transform +from finat.fiat_elements import ScalarFiatElement +from finat.physically_mapped import Citations, identity, PhysicallyMappedElement + + +class QuadraticPowellSabin6(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=2): + if Citations is not None: + Citations().register("PowellSabin1977") + super().__init__(FIAT.QuadraticPowellSabin6(cell)) + + def basis_transformation(self, coordinate_mapping): + Js = [coordinate_mapping.jacobian_at(vertex) + for vertex in self.cell.get_vertices()] + + h = coordinate_mapping.cell_size() + + d = self.cell.get_dimension() + M = identity(self.space_dimension()) + + cur = 0 + for i in range(d+1): + cur += 1 # skip the vertex + J = Js[i] + for j in range(d): + for k in range(d): + M[cur+j, cur+k] = J[j, k] / h[i] + cur += d + + return ListTensor(M) + + +class QuadraticPowellSabin12(PhysicallyMappedElement, ScalarFiatElement): + def __init__(self, cell, degree=2, avg=False): + self.avg = avg + if Citations is not None: + Citations().register("PowellSabin1977") + super().__init__(FIAT.QuadraticPowellSabin12(cell)) + + def basis_transformation(self, coordinate_mapping): + J = coordinate_mapping.jacobian_at([1/3, 1/3]) + + V = identity(self.space_dimension()) + + sd = self.cell.get_dimension() + top = self.cell.get_topology() + voffset = sd + 1 + for v in sorted(top[0]): + s = voffset * v + for i in range(sd): + for j in range(sd): + V[s+1+i, s+1+j] = J[j, i] + + _edge_transform(V, 1, 0, self.cell, coordinate_mapping, avg=self.avg) + + # Patch up conditioning + h = coordinate_mapping.cell_size() + for v in sorted(top[0]): + for k in range(sd): + V[:, voffset*v+1+k] /= h[v] + return ListTensor(V.T) diff --git a/finat/quadrature.py b/finat/quadrature.py new file mode 100644 index 000000000..ec6def127 --- /dev/null +++ b/finat/quadrature.py @@ -0,0 +1,140 @@ +from abc import ABCMeta, abstractproperty +from functools import reduce + +import gem +import numpy +from FIAT.quadrature import GaussLegendreQuadratureLineRule +from FIAT.quadrature_schemes import create_quadrature as fiat_scheme +from FIAT.reference_element import LINE, QUADRILATERAL, TENSORPRODUCT +from gem.utils import cached_property + +from finat.point_set import GaussLegendrePointSet, PointSet, TensorPointSet + + +def make_quadrature(ref_el, degree, scheme="default"): + """ + Generate quadrature rule for given reference element + that will integrate an polynomial of order 'degree' exactly. + + For low-degree (<=6) polynomials on triangles and tetrahedra, this + uses hard-coded rules, otherwise it falls back to a collapsed + Gauss scheme on simplices. On tensor-product cells, it is a + tensor-product quadrature rule of the subcells. + + :arg ref_el: The FIAT cell to create the quadrature for. + :arg degree: The degree of polynomial that the rule should + integrate exactly. + """ + if ref_el.get_shape() == TENSORPRODUCT: + try: + degree = tuple(degree) + except TypeError: + degree = (degree,) * len(ref_el.cells) + + assert len(ref_el.cells) == len(degree) + quad_rules = [make_quadrature(c, d, scheme) + for c, d in zip(ref_el.cells, degree)] + return TensorProductQuadratureRule(quad_rules, ref_el=ref_el) + + if ref_el.get_shape() == QUADRILATERAL: + return make_quadrature(ref_el.product, degree, scheme) + + if degree < 0: + raise ValueError("Need positive degree, not %d" % degree) + + if ref_el.get_shape() == LINE and not ref_el.is_macrocell(): + # FIAT uses Gauss-Legendre line quadature, however, since we + # symbolically label it as such, we wish not to risk attaching + # the wrong label in case FIAT changes. So we explicitly ask + # for Gauss-Legendre line quadature. + num_points = (degree + 1 + 1) // 2 # exact integration + fiat_rule = GaussLegendreQuadratureLineRule(ref_el, num_points) + point_set = GaussLegendrePointSet(fiat_rule.get_points()) + return QuadratureRule(point_set, fiat_rule.get_weights(), ref_el=ref_el, io_ornt_map_tuple=fiat_rule._intrinsic_orientation_permutation_map_tuple) + + fiat_rule = fiat_scheme(ref_el, degree, scheme) + return QuadratureRule(PointSet(fiat_rule.get_points()), fiat_rule.get_weights(), ref_el=ref_el, io_ornt_map_tuple=fiat_rule._intrinsic_orientation_permutation_map_tuple) + + +class AbstractQuadratureRule(metaclass=ABCMeta): + """Abstract class representing a quadrature rule as point set and a + corresponding set of weights.""" + + @abstractproperty + def point_set(self): + """Point set object representing the quadrature points.""" + + @abstractproperty + def weight_expression(self): + """GEM expression describing the weights, with the same free indices + as the point set.""" + + @cached_property + def extrinsic_orientation_permutation_map(self): + """A map from extrinsic orientations to corresponding axis permutation matrices. + + Notes + ----- + result[eo] gives the physical axis-reference axis permutation matrix corresponding to + eo (extrinsic orientation). + + """ + if self.ref_el is None: + raise ValueError("Must set ref_el") + return self.ref_el.extrinsic_orientation_permutation_map + + @cached_property + def intrinsic_orientation_permutation_map_tuple(self): + """A tuple of maps from intrinsic orientations to corresponding point permutations for each reference cell axis. + + Notes + ----- + result[axis][io] gives the physical point-reference point permutation array corresponding to + io (intrinsic orientation) on ``axis``. + + """ + if any(m is None for m in self._intrinsic_orientation_permutation_map_tuple): + raise ValueError("Must set _intrinsic_orientation_permutation_map_tuple") + return self._intrinsic_orientation_permutation_map_tuple + + +class QuadratureRule(AbstractQuadratureRule): + """Generic quadrature rule with no internal structure.""" + + def __init__(self, point_set, weights, ref_el=None, io_ornt_map_tuple=(None, )): + weights = numpy.asarray(weights) + assert len(point_set.points) == len(weights) + + self.ref_el = ref_el + self.point_set = point_set + self.weights = numpy.asarray(weights) + self._intrinsic_orientation_permutation_map_tuple = io_ornt_map_tuple + + @cached_property + def point_set(self): + pass # set at initialisation + + @cached_property + def weight_expression(self): + return gem.Indexed(gem.Literal(self.weights), self.point_set.indices) + + +class TensorProductQuadratureRule(AbstractQuadratureRule): + """Quadrature rule which is a tensor product of other rules.""" + + def __init__(self, factors, ref_el=None): + self.ref_el = ref_el + self.factors = tuple(factors) + self._intrinsic_orientation_permutation_map_tuple = tuple( + m + for factor in factors + for m in factor._intrinsic_orientation_permutation_map_tuple + ) + + @cached_property + def point_set(self): + return TensorPointSet(q.point_set for q in self.factors) + + @cached_property + def weight_expression(self): + return reduce(gem.Product, (q.weight_expression for q in self.factors)) diff --git a/finat/quadrature_element.py b/finat/quadrature_element.py new file mode 100644 index 000000000..3f17ec399 --- /dev/null +++ b/finat/quadrature_element.py @@ -0,0 +1,143 @@ +from finat.point_set import UnknownPointSet +from functools import reduce + +import numpy + +import FIAT + +import gem +from gem.interpreter import evaluate +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase +from finat.quadrature import make_quadrature, AbstractQuadratureRule + + +def make_quadrature_element(fiat_ref_cell, degree, scheme="default"): + """Construct a :class:`QuadratureElement` from a given a reference + element, degree and scheme. + + :param fiat_ref_cell: The FIAT reference cell to build the + :class:`QuadratureElement` on. + :param degree: The degree of polynomial that the rule should + integrate exactly. + :param scheme: The quadrature scheme to use - e.g. "default", + "canonical" or "KMV". + :returns: The appropriate :class:`QuadratureElement` + """ + rule = make_quadrature(fiat_ref_cell, degree, scheme=scheme) + return QuadratureElement(fiat_ref_cell, rule) + + +class QuadratureElement(FiniteElementBase): + """A set of quadrature points pretending to be a finite element.""" + + def __init__(self, fiat_ref_cell, rule): + """Construct a :class:`QuadratureElement`. + + :param fiat_ref_cell: The FIAT reference cell to build the + :class:`QuadratureElement` on + :param rule: A :class:`AbstractQuadratureRule` to use + """ + self.cell = fiat_ref_cell + if not isinstance(rule, AbstractQuadratureRule): + raise TypeError("rule is not an AbstractQuadratureRule") + if fiat_ref_cell.get_spatial_dimension() != rule.point_set.dimension: + raise ValueError("Cell dimension does not match rule's point set dimension") + self._rule = rule + + @cached_property + def cell(self): + pass # set at initialisation + + @property + def complex(self): + return self.cell + + @property + def degree(self): + raise NotImplementedError("QuadratureElement does not represent a polynomial space.") + + @property + def formdegree(self): + return None + + @cached_property + def _entity_dofs(self): + # Inspired by ffc/quadratureelement.py + entity_dofs = {dim: {entity: [] for entity in entities} + for dim, entities in self.cell.get_topology().items()} + entity_dofs[self.cell.get_dimension()] = {0: list(range(self.space_dimension()))} + return entity_dofs + + def entity_dofs(self): + return self._entity_dofs + + def space_dimension(self): + return numpy.prod(self.index_shape, dtype=int) + + @property + def index_shape(self): + ps = self._rule.point_set + return tuple(index.extent for index in ps.indices) + + @property + def value_shape(self): + return () + + @cached_property + def fiat_equivalent(self): + ps = self._rule.point_set + if isinstance(ps, UnknownPointSet): + raise ValueError("A quadrature element with rule with runtime points has no fiat equivalent!") + weights = getattr(self._rule, 'weights', None) + if weights is None: + # we need the weights. + weights, = evaluate([self._rule.weight_expression]) + weights = weights.arr.flatten() + self._rule.weights = weights + + return FIAT.QuadratureElement(self.cell, ps.points, weights) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set object. + :param entity: the cell entity on which to tabulate. + ''' + if entity is not None and entity != (self.cell.get_dimension(), 0): + raise ValueError('QuadratureElement does not "tabulate" on subentities.') + + if order: + raise ValueError("Derivatives are not defined on a QuadratureElement.") + + if not self._rule.point_set.almost_equal(ps): + raise ValueError("Mismatch of quadrature points!") + + # Return an outer product of identity matrices + multiindex = self.get_indices() + product = reduce(gem.Product, [gem.Delta(q, r) + for q, r in zip(ps.indices, multiindex)]) + + dim = self.cell.get_spatial_dimension() + return {(0,) * dim: gem.ComponentTensor(product, multiindex)} + + def point_evaluation(self, order, refcoords, entity=None): + raise NotImplementedError("QuadratureElement cannot do point evaluation!") + + @property + def dual_basis(self): + ps = self._rule.point_set + multiindex = self.get_indices() + # Evaluation matrix is just an outer product of identity + # matrices, evaluation points are just the quadrature points. + Q = reduce(gem.Product, (gem.Delta(q, r) + for q, r in zip(ps.indices, multiindex))) + Q = gem.ComponentTensor(Q, multiindex) + return Q, ps + + @property + def mapping(self): + return "affine" diff --git a/finat/restricted.py b/finat/restricted.py new file mode 100644 index 000000000..cbd56b38f --- /dev/null +++ b/finat/restricted.py @@ -0,0 +1,247 @@ +from functools import singledispatch +from itertools import chain + +import FIAT +from FIAT.polynomial_set import mis + +import finat +from finat.fiat_elements import FiatElement +from finat.physically_mapped import PhysicallyMappedElement + + +# Sentinel for when restricted element is empty +null_element = object() + + +@singledispatch +def restrict(element, domain, take_closure): + """Restrict an element to a given subentity. + + :arg element: The element to restrict. + :arg domain: The subentity to restrict to. + :arg take_closure: Gather dofs in closure of the subentities? + Ignored for "interior" domain. + + :raises NotImplementedError: If we don't know how to restrict this + element. + :raises ValueError: If the restricted element is empty. + :returns: A new finat element.""" + return NotImplementedError(f"Don't know how to restrict element of type {type(element)}") + + +@restrict.register(FiatElement) +def restrict_fiat(element, domain, take_closure): + try: + return FiatElement(FIAT.RestrictedElement(element._element, + restriction_domain=domain, take_closure=take_closure)) + except ValueError: + return null_element + + +@restrict.register(PhysicallyMappedElement) +def restrict_physically_mapped(element, domain, take_closure): + raise NotImplementedError("Can't restrict Physically Mapped things") + + +@restrict.register(finat.FlattenedDimensions) +def restrict_flattened_dimensions(element, domain, take_closure): + restricted = restrict(element.product, domain, take_closure) + if restricted is null_element: + return null_element + else: + return finat.FlattenedDimensions(restricted) + + +@restrict.register(finat.DiscontinuousElement) +@restrict.register(finat.DiscontinuousLagrange) +@restrict.register(finat.Legendre) +def restrict_discontinuous(element, domain, take_closure): + if domain == "interior": + return element + else: + return null_element + + +@restrict.register(finat.EnrichedElement) +def restrict_enriched(element, domain, take_closure): + if all(isinstance(e, finat.mixed.MixedSubElement) for e in element.elements): + # Mixed is handled by Enriched + MixedSubElement, we must + # restrict the subelements here because the transformation is + # nonlocal. + elements = tuple(restrict(e.element, domain, take_closure) for + e in element.elements) + reconstruct = finat.mixed.MixedElement + elif not any(isinstance(e, finat.mixed.MixedSubElement) for e in element.elements): + elements = tuple(restrict(e, domain, take_closure) + for e in element.elements) + reconstruct = finat.EnrichedElement + else: + raise NotImplementedError("Not expecting enriched with mixture of MixedSubElement and others") + + elements = tuple(e for e in elements if e is not null_element) + if elements: + return reconstruct(elements) + else: + return null_element + + +@restrict.register(finat.HCurlElement) +def restrict_hcurl(element, domain, take_closure): + restricted = restrict(element.wrappee, domain, take_closure) + if restricted is null_element: + return null_element + else: + if isinstance(restricted, finat.EnrichedElement): + return finat.EnrichedElement(finat.HCurlElement(e) + for e in restricted.elements) + else: + return finat.HCurlElement(restricted) + + +@restrict.register(finat.HDivElement) +def restrict_hdiv(element, domain, take_closure): + restricted = restrict(element.wrappee, domain, take_closure) + if restricted is null_element: + return null_element + else: + if isinstance(restricted, finat.EnrichedElement): + return finat.EnrichedElement(finat.HDivElement(e) + for e in restricted.elements) + else: + return finat.HDivElement(restricted) + + +@restrict.register(finat.mixed.MixedSubElement) +def restrict_mixed(element, domain, take_closure): + raise AssertionError("Was expecting this to be handled inside EnrichedElement restriction") + + +def r_to_codim(restriction, dim): + if restriction == "interior": + return 0 + elif restriction == "facet": + return 1 + elif restriction == "face": + return dim - 2 + elif restriction == "edge": + return dim - 1 + elif restriction == "vertex": + return dim + else: + raise ValueError + + +def codim_to_r(codim, dim): + d = dim - codim + if codim == 0: + return "interior" + elif codim == 1: + return "facet" + elif d == 0: + return "vertex" + elif d == 1: + return "edge" + elif d == 2: + return "face" + else: + raise ValueError + + +@restrict.register(finat.TensorProductElement) +def restrict_tpe(element, domain, take_closure): + # The restriction of a TPE to a codim subentity is the direct sum + # of TPEs where the factors have been restricted in such a way + # that the sum of those restrictions is codim. + # + # For example, to restrict an interval x interval to edges (codim 1) + # we construct + # + # R(I, 0)⊗R(I, 1) ⊕ R(I, 1)⊗R(I, 0) + # + # If take_closure is true, the restriction wants to select dofs on + # entities with dim >= codim >= 1 (for the edge example) + # so we get + # + # R(I, 0)⊗R(I, 1) ⊕ R(I, 1)⊗R(I, 0) ⊕ R(I, 0)⊗R(I, 0) + factors = element.factors + dimension = element.cell.get_spatial_dimension() + # Figure out which codim entity we're selecting + codim = r_to_codim(domain, dimension) + # And the range of codims. + upper = 1 + (dimension + if (take_closure and domain != "interior") + else codim) + # restrictions on each factor taken from n-tuple that sums to the + # target codim (as long as the codim <= dim_factor) + restrictions = tuple(candidate + for candidate in chain(*(mis(len(factors), c) + for c in range(codim, upper))) + if all(d <= factor.cell.get_dimension() + for d, factor in zip(candidate, factors))) + take_closure = False + elements = [] + for decomposition in restrictions: + # Recurse, but don't take closure in recursion (since we + # handled it already). + new_factors = tuple( + restrict(factor, codim_to_r(codim, factor.cell.get_dimension()), + take_closure) + for factor, codim in zip(factors, decomposition)) + # If one of the factors was empty then the whole TPE is empty, + # so skip. + if all(f is not null_element for f in new_factors): + elements.append(finat.TensorProductElement(new_factors)) + if elements: + return finat.EnrichedElement(elements) + else: + return null_element + + +@restrict.register(finat.TensorFiniteElement) +def restrict_tfe(element, domain, take_closure): + restricted = restrict(element._base_element, domain, take_closure) + if restricted is null_element: + return null_element + else: + return finat.TensorFiniteElement(restricted, element._shape, element._transpose) + + +@restrict.register(finat.HDivTrace) +def restrict_hdivtrace(element, domain, take_closure): + try: + return FiatElement(FIAT.RestrictedElement(element._element, restriction_domain=domain)) + except ValueError: + return null_element + + +def RestrictedElement(element, restriction_domain, *, indices=None): + """Construct a restricted element. + + :arg element: The element to restrict. + :arg restriction_domain: Which entities to restrict to. + :arg indices: Indices of basis functions to select (not supported) + :returns: A new element. + + .. note:: + + A restriction domain of "interior" means to select the dofs on + the cell, all other domains (e.g. "face", "edge") select dofs + in the closure of the entity. + + .. warning:: + + The returned element *may not* be interpolatory. That is, the + dual basis (if implemented) might not be nodal to the primal + basis. Assembly still works (``basis_evaluation`` is fine), but + interpolation may produce bad results. + + Restrictions of FIAT-implemented CiarletElements are always + nodal. + """ + if indices is not None: + raise NotImplementedError("Only done for topological restrictions") + assert restriction_domain is not None + restricted = restrict(element, restriction_domain, take_closure=True) + if restricted is null_element: + raise ValueError("Restricted element is empty") + return restricted diff --git a/finat/runtime_tabulated.py b/finat/runtime_tabulated.py new file mode 100644 index 000000000..cc629fb43 --- /dev/null +++ b/finat/runtime_tabulated.py @@ -0,0 +1,110 @@ +from FIAT.polynomial_set import mis +from FIAT.reference_element import LINE + +import gem +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase + + +class RuntimeTabulated(FiniteElementBase): + """Element placeholder for tabulations provided at run time through a + kernel argument. + + Used by Themis. + """ + + def __init__(self, cell, degree, variant=None, shift_axes=0, + restriction=None, continuous=True): + """Construct a runtime tabulated element. + + :arg cell: reference cell + :arg degree: polynomial degree (int) + :arg variant: variant string of the UFL element + :arg shift_axes: first dimension + :arg restriction: None for single-cell integrals, '+' or '-' + for interior facet integrals depending on + which we need the tabulation on + :arg continuous: continuous or discontinuous element? + """ + # Currently only interval elements are accepted. + if cell.get_shape() != LINE: + raise NotImplementedError("Runtime tabulated elements limited to 1D.") + + # Sanity check + assert isinstance(variant, str) + assert isinstance(shift_axes, int) and 0 <= shift_axes + assert isinstance(continuous, bool) + assert restriction in [None, '+', '-'] + + self.cell = cell + self.degree = degree + self.variant = variant + self.shift_axes = shift_axes + self.restriction = restriction + self.continuous = continuous + + @cached_property + def cell(self): + pass # set at initialization + + @cached_property + def degree(self): + pass # set at initialization + + @cached_property + def formdegree(self): + if self.continuous: + return 0 + else: + return self.cell.get_spatial_dimension() + + def entity_dofs(self): + raise NotImplementedError("I cannot tell where my DoFs are... :-/") + + def space_dimension(self): + return self.degree + 1 + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + """Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set object. + :param entity: the cell entity on which to tabulate. + """ + # Spatial dimension + dimension = self.cell.get_spatial_dimension() + + # Shape of the tabulation matrix + shape = tuple(index.extent for index in ps.indices) + self.index_shape + self.value_shape + + result = {} + for derivative in range(order + 1): + for alpha in mis(dimension, derivative): + name = str.format("rt_{}_{}_{}_{}_{}_{}", + self.variant, + self.degree, + ''.join(map(str, alpha)), + self.shift_axes, + 'c' if self.continuous else 'd', + {None: "", + '+': "p", + '-': "m"}[self.restriction]) + result[alpha] = gem.partial_indexed(gem.Variable(name, shape), ps.indices) + return result + + def point_evaluation(self, order, point, entity=None): + raise NotImplementedError("Point evaluation not supported for runtime tabulated elements") + + @property + def index_shape(self): + return (self.space_dimension(),) + + @property + def value_shape(self): + return () + + @property + def mapping(self): + return "affine" diff --git a/finat/spectral.py b/finat/spectral.py new file mode 100644 index 000000000..3b4d46323 --- /dev/null +++ b/finat/spectral.py @@ -0,0 +1,128 @@ +import FIAT + +import gem + +from finat.fiat_elements import ScalarFiatElement, Lagrange, DiscontinuousLagrange +from finat.point_set import GaussLobattoLegendrePointSet, GaussLegendrePointSet + + +class GaussLobattoLegendre(Lagrange): + """1D continuous element with nodes at the Gauss-Lobatto points.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.GaussLobattoLegendre(cell, degree) + super(Lagrange, self).__init__(fiat_element) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set. + :param entity: the cell entity on which to tabulate. + ''' + + result = super().basis_evaluation(order, ps, entity) + cell_dimension = self.cell.get_dimension() + if entity is None or entity == (cell_dimension, 0): # on cell interior + space_dim = self.space_dimension() + if isinstance(ps, GaussLobattoLegendrePointSet) and len(ps.points) == space_dim: + # Bingo: evaluation points match node locations! + spatial_dim = self.cell.get_spatial_dimension() + q, = ps.indices + r, = self.get_indices() + result[(0,) * spatial_dim] = gem.ComponentTensor(gem.Delta(q, r), (r,)) + return result + + +class GaussLegendre(DiscontinuousLagrange): + """1D discontinuous element with nodes at the Gauss-Legendre points.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.GaussLegendre(cell, degree) + super(DiscontinuousLagrange, self).__init__(fiat_element) + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + '''Return code for evaluating the element at known points on the + reference element. + + :param order: return derivatives up to this order. + :param ps: the point set. + :param entity: the cell entity on which to tabulate. + ''' + + result = super().basis_evaluation(order, ps, entity) + cell_dimension = self.cell.get_dimension() + if entity is None or entity == (cell_dimension, 0): # on cell interior + space_dim = self.space_dimension() + if isinstance(ps, GaussLegendrePointSet) and len(ps.points) == space_dim: + # Bingo: evaluation points match node locations! + spatial_dim = self.cell.get_spatial_dimension() + q, = ps.indices + r, = self.get_indices() + result[(0,) * spatial_dim] = gem.ComponentTensor(gem.Delta(q, r), (r,)) + return result + + +class Legendre(ScalarFiatElement): + """DG element with Legendre polynomials.""" + + def __init__(self, cell, degree, variant=None): + fiat_element = FIAT.Legendre(cell, degree, variant=variant) + super().__init__(fiat_element) + + +class IntegratedLegendre(ScalarFiatElement): + """CG element with integrated Legendre polynomials.""" + + def __init__(self, cell, degree, variant=None): + fiat_element = FIAT.IntegratedLegendre(cell, degree, variant=variant) + super().__init__(fiat_element) + + +class FDMLagrange(ScalarFiatElement): + """1D CG element with FDM shape functions and point evaluation BCs.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMLagrange(cell, degree) + super().__init__(fiat_element) + + +class FDMDiscontinuousLagrange(ScalarFiatElement): + """1D DG element with derivatives of FDM shape functions with point evaluation Bcs.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMDiscontinuousLagrange(cell, degree) + super().__init__(fiat_element) + + +class FDMQuadrature(ScalarFiatElement): + """1D CG element with FDM shape functions and orthogonalized vertex modes.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMQuadrature(cell, degree) + super().__init__(fiat_element) + + +class FDMBrokenH1(ScalarFiatElement): + """1D Broken CG element with FDM shape functions.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMBrokenH1(cell, degree) + super().__init__(fiat_element) + + +class FDMBrokenL2(ScalarFiatElement): + """1D DG element with derivatives of FDM shape functions.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMBrokenL2(cell, degree) + super().__init__(fiat_element) + + +class FDMHermite(ScalarFiatElement): + """1D CG element with FDM shape functions, point evaluation BCs and derivative BCs.""" + + def __init__(self, cell, degree): + fiat_element = FIAT.FDMHermite(cell, degree) + super().__init__(fiat_element) diff --git a/finat/sympy2gem.py b/finat/sympy2gem.py new file mode 100644 index 000000000..29add8760 --- /dev/null +++ b/finat/sympy2gem.py @@ -0,0 +1,149 @@ +from functools import singledispatch, reduce + +import numpy +import sympy +try: + import symengine +except ImportError: + class Mock: + def __getattribute__(self, name): + return Mock + symengine = Mock() + +import gem + + +@singledispatch +def sympy2gem(node, self): + raise AssertionError("sympy/symengine node expected, got %s" % type(node)) + + +@sympy2gem.register(sympy.Expr) +@sympy2gem.register(symengine.Expr) +def sympy2gem_expr(node, self): + raise NotImplementedError("no handler for sympy/symengine node type %s" % type(node)) + + +@sympy2gem.register(sympy.Add) +@sympy2gem.register(symengine.Add) +def sympy2gem_add(node, self): + return reduce(gem.Sum, map(self, node.args)) + + +@sympy2gem.register(sympy.Mul) +@sympy2gem.register(symengine.Mul) +def sympy2gem_mul(node, self): + return reduce(gem.Product, map(self, node.args)) + + +@sympy2gem.register(sympy.Pow) +@sympy2gem.register(symengine.Pow) +def sympy2gem_pow(node, self): + return gem.Power(*map(self, node.args)) + + +@sympy2gem.register(sympy.logic.boolalg.BooleanTrue) +@sympy2gem.register(sympy.logic.boolalg.BooleanFalse) +@sympy2gem.register(bool) +def sympy2gem_boolean(node, self): + return gem.Literal(bool(node)) + + +@sympy2gem.register(sympy.Integer) +@sympy2gem.register(symengine.Integer) +@sympy2gem.register(int) +def sympy2gem_integer(node, self): + return gem.Literal(int(node)) + + +@sympy2gem.register(sympy.Float) +@sympy2gem.register(symengine.Float) +@sympy2gem.register(float) +def sympy2gem_float(node, self): + return gem.Literal(float(node)) + + +@sympy2gem.register(sympy.Symbol) +@sympy2gem.register(symengine.Symbol) +def sympy2gem_symbol(node, self): + return self.bindings[node] + + +@sympy2gem.register(sympy.Rational) +@sympy2gem.register(symengine.Rational) +def sympy2gem_rational(node, self): + return gem.Division(*(map(self, node.as_numer_denom()))) + + +@sympy2gem.register(sympy.Abs) +@sympy2gem.register(symengine.Abs) +def sympy2gem_abs(node, self): + return gem.MathFunction("abs", *map(self, node.args)) + + +@sympy2gem.register(sympy.Not) +@sympy2gem.register(symengine.Not) +def sympy2gem_not(node, self): + return gem.LogicalNot(*map(self, node.args)) + + +@sympy2gem.register(sympy.Or) +@sympy2gem.register(symengine.Or) +def sympy2gem_or(node, self): + return reduce(gem.LogicalOr, map(self, node.args)) + + +@sympy2gem.register(sympy.And) +@sympy2gem.register(symengine.And) +def sympy2gem_and(node, self): + return reduce(gem.LogicalAnd, map(self, node.args)) + + +@sympy2gem.register(sympy.Eq) +@sympy2gem.register(symengine.Eq) +def sympy2gem_eq(node, self): + return gem.Comparison("==", *map(self, node.args)) + + +@sympy2gem.register(sympy.Gt) +def sympy2gem_gt(node, self): + return gem.Comparison(">", *map(self, node.args)) + + +@sympy2gem.register(sympy.Ge) +def sympy2gem_ge(node, self): + return gem.Comparison(">=", *map(self, node.args)) + + +@sympy2gem.register(sympy.Lt) +@sympy2gem.register(symengine.Lt) +def sympy2gem_lt(node, self): + return gem.Comparison("<", *map(self, node.args)) + + +@sympy2gem.register(sympy.Le) +@sympy2gem.register(symengine.Le) +def sympy2gem_le(node, self): + return gem.Comparison("<=", *map(self, node.args)) + + +@sympy2gem.register(sympy.Piecewise) +@sympy2gem.register(symengine.Piecewise) +def sympy2gem_conditional(node, self): + expr = None + pieces = [] + for v, c in node.args: + if isinstance(c, (bool, numpy.bool, sympy.logic.boolalg.BooleanTrue)) and c: + expr = self(v) + break + pieces.append((v, c)) + if expr is None: + expr = gem.Literal(float("nan")) + for v, c in reversed(pieces): + expr = gem.Conditional(self(c), self(v), expr) + return expr + + +@sympy2gem.register(sympy.ITE) +def sympy2gem_ifthenelse(node, self): + return gem.Conditional(*map(self, node.args)) diff --git a/finat/tensor_product.py b/finat/tensor_product.py new file mode 100644 index 000000000..f0fd58477 --- /dev/null +++ b/finat/tensor_product.py @@ -0,0 +1,340 @@ +from functools import reduce +from itertools import chain, product +from operator import methodcaller + +import numpy + +import FIAT +from FIAT.polynomial_set import mis +from FIAT.reference_element import TensorProductCell +from FIAT.orientation_utils import make_entity_permutations_tensorproduct + +import gem +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase +from finat.point_set import PointSingleton, PointSet, TensorPointSet + + +class TensorProductElement(FiniteElementBase): + + def __init__(self, factors): + super(TensorProductElement, self).__init__() + self.factors = tuple(factors) + + shapes = [fe.value_shape for fe in self.factors if fe.value_shape != ()] + if len(shapes) == 0: + self._value_shape = () + elif len(shapes) == 1: + self._value_shape = shapes[0] + else: + raise NotImplementedError("Only one nonscalar factor permitted!") + + @cached_property + def cell(self): + return TensorProductCell(*[fe.cell for fe in self.factors]) + + @cached_property + def complex(self): + return TensorProductCell(*[fe.complex for fe in self.factors]) + + @property + def degree(self): + return tuple(fe.degree for fe in self.factors) + + @cached_property + def formdegree(self): + if any(fe.formdegree is None for fe in self.factors): + return None + else: + return sum(fe.formdegree for fe in self.factors) + + @cached_property + def _entity_dofs(self): + return productise(self.factors, methodcaller("entity_dofs")) + + @cached_property + def _entity_support_dofs(self): + return productise(self.factors, methodcaller("entity_support_dofs")) + + def entity_dofs(self): + return self._entity_dofs + + @cached_property + def entity_permutations(self): + return compose_permutations(self.factors) + + def space_dimension(self): + return numpy.prod([fe.space_dimension() for fe in self.factors]) + + @property + def index_shape(self): + return tuple(chain(*[fe.index_shape for fe in self.factors])) + + @property + def value_shape(self): + return self._value_shape + + @cached_property + def fiat_equivalent(self): + # FIAT TensorProductElement support only 2 factors + A, B = self.factors + return FIAT.TensorProductElement(A.fiat_equivalent, B.fiat_equivalent) + + def _factor_entity(self, entity): + # Default entity + if entity is None: + entity = (self.cell.get_dimension(), 0) + entity_dim, entity_id = entity + + # Factor entity + assert isinstance(entity_dim, tuple) + assert len(entity_dim) == len(self.factors) + + shape = tuple(len(c.get_topology()[d]) + for c, d in zip(self.cell.cells, entity_dim)) + entities = list(zip(entity_dim, numpy.unravel_index(entity_id, shape))) + return entities + + def _merge_evaluations(self, factor_results): + # Spatial dimension + dimension = self.cell.get_spatial_dimension() + + # Derivative order + order = max(map(sum, chain(*factor_results))) + + # A list of slices that are used to select dimensions + # corresponding to each subelement. + dim_slices = TensorProductCell._split_slices([c.get_spatial_dimension() + for c in self.cell.cells]) + + # A list of multiindices, one multiindex per subelement, each + # multiindex describing the shape of basis functions of the + # subelement. + alphas = [fe.get_indices() for fe in self.factors] + + # A list of multiindices, one multiindex per subelement, each + # multiindex describing the value shape of the subelement. + zetas = [fe.get_value_indices() for fe in self.factors] + + result = {} + for derivative in range(order + 1): + for Delta in mis(dimension, derivative): + # Split the multiindex for the subelements + deltas = [Delta[s] for s in dim_slices] + # GEM scalars (can have free indices) for collecting + # the contributions from the subelements. + scalars = [] + for fr, delta, alpha, zeta in zip(factor_results, deltas, alphas, zetas): + # Turn basis shape to free indices, select the + # right derivative entry, and collect the result. + scalars.append(gem.Indexed(fr[delta], alpha + zeta)) + # Multiply the values from the subelements and wrap up + # non-point indices into shape. + result[Delta] = gem.ComponentTensor( + reduce(gem.Product, scalars), + tuple(chain(*(alphas + zetas))) + ) + return result + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + entities = self._factor_entity(entity) + entity_dim, _ = zip(*entities) + + ps_factors = factor_point_set(self.cell, entity_dim, ps) + + factor_results = [fe.basis_evaluation(order, ps_, e) + for fe, ps_, e in zip(self.factors, ps_factors, entities)] + + return self._merge_evaluations(factor_results) + + def point_evaluation(self, order, point, entity=None): + entities = self._factor_entity(entity) + entity_dim, _ = zip(*entities) + + # Split point expression + assert len(self.cell.cells) == len(entity_dim) + point_dims = [cell.construct_subelement(dim).get_spatial_dimension() + for cell, dim in zip(self.cell.cells, entity_dim)] + assert isinstance(point, gem.Node) and point.shape == (sum(point_dims),) + slices = TensorProductCell._split_slices(point_dims) + point_factors = [] + for s in slices: + point_factors.append(gem.ListTensor( + [gem.Indexed(point, (i,)) + for i in range(s.start, s.stop)] + )) + + # Subelement results + factor_results = [fe.point_evaluation(order, p_, e) + for fe, p_, e in zip(self.factors, point_factors, entities)] + + return self._merge_evaluations(factor_results) + + @property + def dual_basis(self): + # Outer product the dual bases of the factors + qs, pss = zip(*(factor.dual_basis for factor in self.factors)) + ps = TensorPointSet(pss) + # Naming as _merge_evaluations above + alphas = [factor.get_indices() for factor in self.factors] + zetas = [factor.get_value_indices() for factor in self.factors] + # Index the factors by so that we can reshape into index-shape + # followed by value-shape + qis = [q[alpha + zeta] for q, alpha, zeta in zip(qs, alphas, zetas)] + Q = gem.ComponentTensor( + reduce(gem.Product, qis), + tuple(chain(*(alphas + zetas))) + ) + return Q, ps + + @cached_property + def mapping(self): + mappings = [fe.mapping for fe in self.factors if fe.mapping != "affine"] + if len(mappings) == 0: + return "affine" + elif len(mappings) == 1: + return mappings[0] + else: + return None + + +def productise(factors, method): + '''Tensor product the dict mapping topological entities to dofs across factors. + + :arg factors: element factors. + :arg method: instance method to call on each factor to get dofs.''' + shape = tuple(fe.space_dimension() for fe in factors) + dofs = {} + for dim in product(*[fe.cell.get_topology().keys() + for fe in factors]): + dim_dofs = [] + topds = [method(fe)[d] + for fe, d in zip(factors, dim)] + for tuple_ei in product(*[sorted(topd) for topd in topds]): + tuple_vs = list(product(*[topd[ei] + for topd, ei in zip(topds, tuple_ei)])) + if tuple_vs: + vs = list(numpy.ravel_multi_index(numpy.transpose(tuple_vs), shape)) + dim_dofs.append((tuple_ei, vs)) + else: + dim_dofs.append((tuple_ei, [])) + # flatten entity numbers + dofs[dim] = dict(enumerate(v for k, v in sorted(dim_dofs))) + return dofs + + +def compose_permutations(factors): + r"""For the :class:`TensorProductElement` object composed of factors, + construct, for each dimension tuple, for each entity, and for each possible + entity orientation combination, the DoF permutation list. + + :arg factors: element factors. + :returns: entity_permutation dict of the :class:`TensorProductElement` object + composed of factors. + + For tensor-product elements, one needs to consider two kinds of orientations: + extrinsic orientations and intrinsic ("material") orientations. + + Example + ------- + + UFCQuadrilateral := UFCInterval x UFCInterval + + eo (extrinsic orientation): swap axes (X -> y, Y-> x) + io (intrinsic orientation): reflect component intervals + o (total orientation) : (2 ** dim) * eo + io + + eo\\io 0 1 2 3 + + 1---3 0---2 3---1 2---0 + 0 | | | | | | | | + 0---2 1---3 2---0 3---1 + + 2---3 3---2 0---1 1---0 + 1 | | | | | | | | + 0---1 1---0 2---3 3---2 + + .. code-block:: python3 + + import FIAT + import finat + + cell = FIAT.ufc_cell("interval") + elem = finat.DiscontinuousLagrange(cell, 1) + elem = finat.TensorProductElement([elem, elem]) + print(elem.entity_permutations) + + prints: + + {(0, 0): {0: {(0, 0, 0): []}, + 1: {(0, 0, 0): []}, + 2: {(0, 0, 0): []}, + 3: {(0, 0, 0): []}}, + (0, 1): {0: {(0, 0, 0): [], + (0, 0, 1): []}, + 1: {(0, 0, 0): [], + (0, 0, 1): []}}, + (1, 0): {0: {(0, 0, 0): [], + (0, 1, 0): []}, + 1: {(0, 0, 0): [], + (0, 1, 0): []}}, + (1, 1): {0: {(0, 0, 0): [0, 1, 2, 3], + (0, 0, 1): [1, 0, 3, 2], + (0, 1, 0): [2, 3, 0, 1], + (0, 1, 1): [3, 2, 1, 0], + (1, 0, 0): [0, 2, 1, 3], + (1, 0, 1): [2, 0, 3, 1], + (1, 1, 0): [1, 3, 0, 2], + (1, 1, 1): [3, 1, 2, 0]}}} + + """ + permutations = {} + cells = [fe.cell for fe in factors] + for dim in product(*[cell.get_topology().keys() for cell in cells]): + dim_permutations = [] + e_o_p_maps = [fe.entity_permutations[d] for fe, d in zip(factors, dim)] + for e_tuple in product(*[sorted(e_o_p_map) for e_o_p_map in e_o_p_maps]): + o_p_maps = [e_o_p_map[e] for e_o_p_map, e in zip(e_o_p_maps, e_tuple)] + o_tuple_perm_map = make_entity_permutations_tensorproduct(cells, dim, o_p_maps) + dim_permutations.append((e_tuple, o_tuple_perm_map)) + permutations[dim] = dict(enumerate(v for k, v in sorted(dim_permutations))) + return permutations + + +def factor_point_set(product_cell, product_dim, point_set): + """Factors a point set for the product element into a point sets for + each subelement. + + :arg product_cell: a TensorProductCell + :arg product_dim: entity dimension for the product cell + :arg point_set: point set for the product element + """ + assert len(product_cell.cells) == len(product_dim) + point_dims = [cell.construct_subelement(dim).get_spatial_dimension() + for cell, dim in zip(product_cell.cells, product_dim)] + + if isinstance(point_set, TensorPointSet) and \ + len(product_cell.cells) == len(point_set.factors): + # Just give the factors asserting matching dimensions. + assert len(point_set.factors) == len(point_dims) + assert all(ps.dimension == dim + for ps, dim in zip(point_set.factors, point_dims)) + return point_set.factors + + # Split the point coordinates along the point dimensions + # required by the subelements. + assert point_set.dimension == sum(point_dims) + slices = TensorProductCell._split_slices(point_dims) + if isinstance(point_set, PointSingleton): + return [PointSingleton(point_set.point[s]) for s in slices] + elif isinstance(point_set, (PointSet, TensorPointSet)): + # Use the same point index for the new point sets. + result = [] + for s in slices: + ps = PointSet(point_set.points[:, s]) + ps.indices = point_set.indices + result.append(ps) + return result + + raise NotImplementedError("How to tabulate TensorProductElement on %s?" % (type(point_set).__name__,)) diff --git a/finat/tensorfiniteelement.py b/finat/tensorfiniteelement.py new file mode 100644 index 000000000..c0a8aa91e --- /dev/null +++ b/finat/tensorfiniteelement.py @@ -0,0 +1,209 @@ +from functools import reduce +from itertools import chain + +import numpy + +import gem +from gem.optimise import delta_elimination, sum_factorise, traverse_product +from gem.utils import cached_property + +from finat.finiteelementbase import FiniteElementBase + + +class TensorFiniteElement(FiniteElementBase): + + def __init__(self, element, shape, transpose=False): + # TODO: Update docstring for arbitrary rank! + r"""A Finite element whose basis functions have the form: + + .. math:: + + \boldsymbol\phi_{i \alpha \beta} = \mathbf{e}_{\alpha} \mathbf{e}_{\beta}^{\mathrm{T}}\phi_i + + Where :math:`\{\mathbf{e}_\alpha,\, \alpha=0\ldots\mathrm{shape[0]}\}` and + :math:`\{\mathbf{e}_\beta,\, \beta=0\ldots\mathrm{shape[1]}\}` are + the bases for :math:`\mathbb{R}^{\mathrm{shape[0]}}` and + :math:`\mathbb{R}^{\mathrm{shape[1]}}` respectively; and + :math:`\{\phi_i\}` is the basis for the corresponding scalar + finite element space. + + :param element: The scalar finite element. + :param shape: The geometric shape of the tensor element. + :param transpose: Changes the DoF ordering from the + Firedrake-style XYZ XYZ XYZ XYZ to the + FEniCS-style XXXX YYYY ZZZZ. That is, + tensor shape indices come before the scalar + basis function indices when transpose=True. + + :math:`\boldsymbol\phi_{i\alpha\beta}` is, of course, tensor-valued. If + we subscript the vector-value with :math:`\gamma\epsilon` then we can write: + + .. math:: + \boldsymbol\phi_{\gamma\epsilon(i\alpha\beta)} = \delta_{\gamma\alpha}\delta_{\epsilon\beta}\phi_i + + This form enables the simplification of the loop nests which + will eventually be created, so it is the form we employ here.""" + super(TensorFiniteElement, self).__init__() + self._base_element = element + self._shape = shape + self._transpose = transpose + + @property + def base_element(self): + """The base element of this tensor element.""" + return self._base_element + + @property + def cell(self): + return self._base_element.cell + + @property + def complex(self): + return self._base_element.complex + + @property + def degree(self): + return self._base_element.degree + + @property + def formdegree(self): + return self._base_element.formdegree + + @cached_property + def _entity_dofs(self): + dofs = {} + base_dofs = self._base_element.entity_dofs() + ndof = int(numpy.prod(self._shape, dtype=int)) + + def expand(dofs): + dofs = tuple(dofs) + if self._transpose: + space_dim = self._base_element.space_dimension() + # Components stride by space dimension of base element + iterable = ((v + i*space_dim for v in dofs) + for i in range(ndof)) + else: + # Components packed together + iterable = (range(v*ndof, (v+1)*ndof) for v in dofs) + yield from chain.from_iterable(iterable) + + for dim in self.cell.get_topology().keys(): + dofs[dim] = dict((k, list(expand(d))) + for k, d in base_dofs[dim].items()) + return dofs + + def entity_dofs(self): + return self._entity_dofs + + def space_dimension(self): + return int(numpy.prod(self.index_shape)) + + @property + def index_shape(self): + if self._transpose: + return self._shape + self._base_element.index_shape + else: + return self._base_element.index_shape + self._shape + + @property + def value_shape(self): + return self._shape + self._base_element.value_shape + + def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None): + r"""Produce the recipe for basis function evaluation at a set of points :math:`q`: + + .. math:: + \boldsymbol\phi_{(\gamma \epsilon) (i \alpha \beta) q} = \delta_{\alpha \gamma} \delta_{\beta \epsilon}\phi_{i q} + + \nabla\boldsymbol\phi_{(\epsilon \gamma \zeta) (i \alpha \beta) q} = \delta_{\alpha \epsilon} \delta_{\beta \gamma}\nabla\phi_{\zeta i q} + """ + scalar_evaluation = self._base_element.basis_evaluation + return self._tensorise(scalar_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping)) + + def point_evaluation(self, order, point, entity=None): + scalar_evaluation = self._base_element.point_evaluation + return self._tensorise(scalar_evaluation(order, point, entity)) + + def _tensorise(self, scalar_evaluation): + # Old basis function and value indices + scalar_i = self._base_element.get_indices() + scalar_vi = self._base_element.get_value_indices() + + # New basis function and value indices + tensor_i = tuple(gem.Index(extent=d) for d in self._shape) + tensor_vi = tuple(gem.Index(extent=d) for d in self._shape) + + # Couple new basis function and value indices + deltas = reduce(gem.Product, (gem.Delta(j, k) + for j, k in zip(tensor_i, tensor_vi))) + + if self._transpose: + index_ordering = tensor_i + scalar_i + tensor_vi + scalar_vi + else: + index_ordering = scalar_i + tensor_i + tensor_vi + scalar_vi + + result = {} + for alpha, expr in scalar_evaluation.items(): + result[alpha] = gem.ComponentTensor( + gem.Product(deltas, gem.Indexed(expr, scalar_i + scalar_vi)), + index_ordering + ) + return result + + @property + def dual_basis(self): + base = self.base_element + Q, points = base.dual_basis + + # Suppose the tensor element has shape (2, 4) + # These identity matrices may have difference sizes depending the shapes + # tQ = Q ⊗ 𝟙₂ ⊗ 𝟙₄ + scalar_i = self.base_element.get_indices() + scalar_vi = self.base_element.get_value_indices() + tensor_i = tuple(gem.Index(extent=d) for d in self._shape) + tensor_vi = tuple(gem.Index(extent=d) for d in self._shape) + # Couple new basis function and value indices + deltas = reduce(gem.Product, (gem.Delta(j, k) + for j, k in zip(tensor_i, tensor_vi))) + if self._transpose: + index_ordering = tensor_i + scalar_i + tensor_vi + scalar_vi + else: + index_ordering = scalar_i + tensor_i + tensor_vi + scalar_vi + + Qi = Q[scalar_i + scalar_vi] + tQ = gem.ComponentTensor(Qi*deltas, index_ordering) + return tQ, points + + def dual_evaluation(self, fn): + tQ, x = self.dual_basis + expr = fn(x) + # Apply targeted sum factorisation and delta elimination to + # the expression + sum_indices, factors = delta_elimination(*traverse_product(expr)) + expr = sum_factorise(sum_indices, factors) + # NOTE: any shape indices in the expression are because the + # expression is tensor valued. + assert expr.shape == self.value_shape + + scalar_i = self.base_element.get_indices() + scalar_vi = self.base_element.get_value_indices() + tensor_i = tuple(gem.Index(extent=d) for d in self._shape) + tensor_vi = tuple(gem.Index(extent=d) for d in self._shape) + + if self._transpose: + index_ordering = tensor_i + scalar_i + tensor_vi + scalar_vi + else: + index_ordering = scalar_i + tensor_i + tensor_vi + scalar_vi + + tQi = tQ[index_ordering] + expri = expr[tensor_i + scalar_vi] + evaluation = gem.IndexSum(tQi * expri, x.indices + scalar_vi + tensor_i) + # This doesn't work perfectly, the resulting code doesn't have + # a minimal memory footprint, although the operation count + # does appear to be minimal. + evaluation = gem.optimise.contraction(evaluation) + return evaluation, scalar_i + tensor_vi + + @property + def mapping(self): + return self._base_element.mapping diff --git a/finat/trace.py b/finat/trace.py new file mode 100644 index 000000000..d5e784281 --- /dev/null +++ b/finat/trace.py @@ -0,0 +1,8 @@ +import FIAT + +from finat.fiat_elements import ScalarFiatElement + + +class HDivTrace(ScalarFiatElement): + def __init__(self, cell, degree): + super().__init__(FIAT.HDivTrace(cell, degree)) diff --git a/finat/ufl/__init__.py b/finat/ufl/__init__.py new file mode 100644 index 000000000..21a7d13d1 --- /dev/null +++ b/finat/ufl/__init__.py @@ -0,0 +1,21 @@ +"""Legacy UFL features.""" +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Andrew T. T. McRae 2014 +# Modified by Lawrence Mitchell 2014 +# Modified by Matthew Scroggs, 2023 + +from finat.ufl.brokenelement import BrokenElement # noqa: F401 +from finat.ufl.enrichedelement import EnrichedElement, NodalEnrichedElement # noqa: F401 +from finat.ufl.finiteelement import FiniteElement # noqa: F401 +from finat.ufl.finiteelementbase import FiniteElementBase # noqa: F401 +from finat.ufl.hdivcurl import HCurlElement, HDivElement, WithMapping, HDiv, HCurl # noqa: F401 +from finat.ufl.mixedelement import MixedElement, TensorElement, VectorElement # noqa: F401 +from finat.ufl.restrictedelement import RestrictedElement # noqa: F401 +from finat.ufl.tensorproductelement import TensorProductElement # noqa: F401 diff --git a/finat/ufl/brokenelement.py b/finat/ufl/brokenelement.py new file mode 100644 index 000000000..3e8202883 --- /dev/null +++ b/finat/ufl/brokenelement.py @@ -0,0 +1,63 @@ +"""Element.""" +# -*- coding: utf-8 -*- +# Copyright (C) 2014 Andrew T. T. McRae +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.sobolevspace import L2 + + +class BrokenElement(FiniteElementBase): + """The discontinuous version of an existing Finite Element space.""" + def __init__(self, element): + """Init.""" + self._element = element + + family = "BrokenElement" + cell = element.cell + degree = element.degree() + quad_scheme = element.quadrature_scheme() + reference_value_shape = element.reference_value_shape + FiniteElementBase.__init__(self, family, cell, degree, + quad_scheme, reference_value_shape) + + def __repr__(self): + """Doc.""" + return f"BrokenElement({repr(self._element)})" + + def mapping(self): + """Doc.""" + return self._element.mapping() + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + return L2 + + def reconstruct(self, **kwargs): + """Doc.""" + return BrokenElement(self._element.reconstruct(**kwargs)) + + def __str__(self): + """Doc.""" + return f"BrokenElement({repr(self._element)})" + + def shortstr(self): + """Format as string for pretty printing.""" + return f"BrokenElement({repr(self._element)})" + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return self._element.embedded_subdegree + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return self._element.embedded_superdegree diff --git a/finat/ufl/elementlist.py b/finat/ufl/elementlist.py new file mode 100644 index 000000000..172eb78f6 --- /dev/null +++ b/finat/ufl/elementlist.py @@ -0,0 +1,468 @@ +"""Element. + +This module provides an extensive list of predefined finite element +families. Users or, more likely, form compilers, may register new +elements by calling the function register_element. +""" +# Copyright (C) 2008-2016 Martin Sandve Alnæs and Anders Logg +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Marie E. Rognes , 2010 +# Modified by Lizao Li , 2015, 2016 +# Modified by Massimiliano Leoni, 2016 +# Modified by Robert Kloefkorn, 2022 +# Modified by Matthew Scroggs, 2023 +# Modified by Pablo Brubeck, 2024 + +import warnings + +from numpy import asarray + +from ufl.cell import Cell, TensorProductCell +from ufl.sobolevspace import H1, H2, L2, HCurl, HDiv, HDivDiv, HCurlDiv, HEin, HInf +from ufl.utils.formatting import istr + +# List of valid elements +ufl_elements = {} + +# Aliases: aliases[name] (...) -> (standard_name, ...) +aliases = {} + + +# Function for registering new elements +def register_element(family, short_name, value_rank, sobolev_space, mapping, + degree_range, cellnames): + """Register new finite element family.""" + if family in ufl_elements: + raise ValueError(f"Finite element '{family}%s' has already been registered.") + ufl_elements[family] = (family, short_name, value_rank, sobolev_space, + mapping, degree_range, cellnames) + if short_name is not None: + ufl_elements[short_name] = (family, short_name, value_rank, sobolev_space, + mapping, degree_range, cellnames) + + +def register_alias(alias, to): + """Doc.""" + aliases[alias] = to + + +def show_elements(): + """Shows all registered elements.""" + print("Showing all registered elements:") + print("================================") + shown = set() + for k in sorted(ufl_elements.keys()): + data = ufl_elements[k] + if data in shown: + continue + shown.add(data) + (family, short_name, value_rank, sobolev_space, mapping, degree_range, cellnames) = data + print(f"Finite element family: '{family}', '{short_name}'") + print(f"Sobolev space: {sobolev_space}%s") + print(f"Mapping: {mapping}") + print(f"Degree range: {degree_range}") + print(f"Value rank: {value_rank}") + print(f"Defined on cellnames: {cellnames}") + print() + + +# FIXME: Consider cleanup of element names. Use notation from periodic +# table as the main, keep old names as compatibility aliases. + +# NOTE: Any element with polynomial degree 0 will be considered L2, +# independent of the space passed to register_element. + +# Cell groups +simplices = ("interval", "triangle", "tetrahedron", "pentatope") +cubes = ("interval", "quadrilateral", "hexahedron", "tesseract") +any_cell = (None, "vertex", *simplices, *cubes[1:], "prism", "pyramid") + +# Elements in the periodic table # TODO: Register these as aliases of +# periodic table element description instead of the other way around +register_element("Lagrange", "CG", 0, H1, "identity", (1, None), any_cell) # "P" +register_element("Brezzi-Douglas-Marini", "BDM", 1, HDiv, "contravariant Piola", (1, None), simplices[1:]) # "BDMF" (2d), "N2F" (3d) +register_element("Discontinuous Lagrange", "DG", 0, L2, "identity", (0, None), any_cell) # "DP" +register_element("Discontinuous Taylor", "TDG", 0, L2, "identity", (0, None), simplices) +register_element("Nedelec 1st kind H(curl)", "N1curl", 1, HCurl, "covariant Piola", (1, None), simplices[1:]) # "RTE" (2d), "N1E" (3d) +register_element("Nedelec 2nd kind H(curl)", "N2curl", 1, HCurl, "covariant Piola", (1, None), simplices[1:]) # "BDME" (2d), "N2E" (3d) +register_element("Raviart-Thomas", "RT", 1, HDiv, "contravariant Piola", (1, None), simplices[1:]) # "RTF" (2d), "N1F" (3d) + +# Elements not in the periodic table +# TODO: Implement generic Tear operator for elements instead of this: +register_element("Brezzi-Douglas-Fortin-Marini", "BDFM", 1, HDiv, "contravariant Piola", (1, None), simplices[1:]) +register_element("Crouzeix-Raviart", "CR", 0, L2, "identity", (1, 1), simplices[1:]) +register_element("Discontinuous Raviart-Thomas", "DRT", 1, L2, "contravariant Piola", (1, None), simplices[1:]) +register_element("Kong-Mulder-Veldhuizen", "KMV", 0, H1, "identity", (1, None), simplices[1:]) + +# Tensor elements +register_element("Regge", "Regge", 2, HEin, "double covariant Piola", (0, None), simplices) +register_element("Hellan-Herrmann-Johnson", "HHJ", 2, HDivDiv, "double contravariant Piola", (0, None), ("triangle", "tetrahedron")) +register_element("Gopalakrishnan-Lederer-Schoberl 1st kind", "GLS", 2, HCurlDiv, "covariant contravariant Piola", (1, None), simplices[1:]) +register_element("Gopalakrishnan-Lederer-Schoberl 2nd kind", "GLS2", 2, HCurlDiv, "covariant contravariant Piola", (0, None), simplices[1:]) + +register_element("Nonconforming Arnold-Winther", "AWnc", 2, HDiv, "double contravariant Piola", (2, 2), ("triangle",)) +register_element("Conforming Arnold-Winther", "AWc", 2, HDiv, "double contravariant Piola", (3, None), ("triangle",)) +register_element("Hu-Zhang", "HZ", 2, HDiv, "double contravariant Piola", (3, None), ("triangle")) + +# Zany elements +register_element("Bernardi-Raugel", "BR", 1, H1, "contravariant Piola", (1, None), simplices[1:]) +register_element("Bernardi-Raugel Bubble", "BRB", 1, H1, "contravariant Piola", (None, None), simplices[1:]) +register_element("Mardal-Tai-Winther", "MTW", 1, H1, "contravariant Piola", (3, 3), ("triangle",)) +register_element("Hermite", "HER", 0, H1, "custom", (3, 3), simplices) +register_element("Argyris", "ARG", 0, H2, "custom", (5, None), ("triangle",)) +register_element("Bell", "BELL", 0, H2, "custom", (5, 5), ("triangle",)) +register_element("Morley", "MOR", 0, H2, "custom", (2, 2), ("triangle",)) + +# Macro elements +register_element("QuadraticPowellSabin6", "PS6", 0, H2, "custom", (2, 2), ("triangle",)) +register_element("QuadraticPowellSabin12", "PS12", 0, H2, "custom", (2, 2), ("triangle",)) +register_element("Hsieh-Clough-Tocher", "HCT", 0, H2, "custom", (3, None), ("triangle",)) +register_element("Reduced-Hsieh-Clough-Tocher", "HCT-red", 0, H2, "custom", (3, 3), ("triangle",)) +register_element("Johnson-Mercier", "JM", 2, HDiv, "double contravariant Piola", (1, 1), simplices[1:]) + +register_element("Arnold-Qin", "AQ", 1, H1, "identity", (2, 2), ("triangle",)) +register_element("Reduced-Arnold-Qin", "AQ-red", 1, H1, "contravariant Piola", (2, 2), ("triangle",)) +register_element("Christiansen-Hu", "CH", 1, H1, "contravariant Piola", (1, 1), simplices[1:]) +register_element("Alfeld-Sorokina", "AS", 1, H1, "contravariant Piola", (2, 2), simplices[1:]) + +register_element("Guzman-Neilan 1st kind H1", "GN", 1, H1, "contravariant Piola", (1, None), simplices[1:]) +register_element("Guzman-Neilan 2nd kind H1", "GN2", 1, H1, "contravariant Piola", (1, None), simplices[1:]) +register_element("Guzman-Neilan H1(div)", "GNH1div", 1, H1, "contravariant Piola", (2, None), simplices[1:]) +register_element("Guzman-Neilan Bubble", "GNB", 1, H1, "contravariant Piola", (None, None), simplices[1:]) + +# Special elements +register_element("Boundary Quadrature", "BQ", 0, L2, "identity", (0, None), any_cell) +register_element("Bubble", "B", 0, H1, "identity", (2, None), simplices) +register_element("FacetBubble", "FB", 0, H1, "identity", (2, None), simplices) +register_element("Quadrature", "Quadrature", 0, L2, "identity", (0, None), any_cell) +register_element("Real", "R", 0, HInf, "identity", (0, 0), any_cell + ("TensorProductCell",)) +register_element("Undefined", "U", 0, L2, "identity", (0, None), any_cell) +register_element("Radau", "Rad", 0, L2, "identity", (0, None), ("interval",)) +register_element("HDiv Trace", "HDivT", 0, L2, "identity", (0, None), any_cell) + +# Spectral elements. +register_element("Gauss-Legendre", "GL", 0, L2, "identity", (0, None), ("interval",)) +register_element("Gauss-Lobatto-Legendre", "GLL", 0, H1, "identity", (1, None), ("interval",)) +register_alias("Lobatto", + lambda family, dim, order, degree: ("Gauss-Lobatto-Legendre", order)) +register_alias("Lob", + lambda family, dim, order, degree: ("Gauss-Lobatto-Legendre", order)) + +register_element("Bernstein", None, 0, H1, "identity", (1, None), simplices) + + +# Let Nedelec H(div) elements be aliases to BDMs/RTs +register_alias("Nedelec 1st kind H(div)", + lambda family, dim, order, degree: ("Raviart-Thomas", order)) +register_alias("N1div", + lambda family, dim, order, degree: ("Raviart-Thomas", order)) + +register_alias("Nedelec 2nd kind H(div)", + lambda family, dim, order, degree: ("Brezzi-Douglas-Marini", + order)) +register_alias("N2div", + lambda family, dim, order, degree: ("Brezzi-Douglas-Marini", + order)) + +# Let Discontinuous Lagrange Trace element be alias to HDiv Trace +register_alias("Discontinuous Lagrange Trace", + lambda family, dim, order, degree: ("HDiv Trace", order)) +register_alias("DGT", + lambda family, dim, order, degree: ("HDiv Trace", order)) + +# New elements introduced for the periodic table 2014 +register_element("Q", None, 0, H1, "identity", (1, None), cubes) +register_element("DQ", None, 0, L2, "identity", (0, None), cubes) +register_element("RTCE", None, 1, HCurl, "covariant Piola", (1, None), ("quadrilateral",)) +register_element("RTCF", None, 1, HDiv, "contravariant Piola", (1, None), ("quadrilateral",)) +register_element("NCE", None, 1, HCurl, "covariant Piola", (1, None), ("hexahedron",)) +register_element("NCF", None, 1, HDiv, "contravariant Piola", (1, None), ("hexahedron",)) + +register_element("S", None, 0, H1, "identity", (1, None), cubes) +register_element("DPC", None, 0, L2, "identity", (0, None), cubes) +register_element("BDMCE", None, 1, HCurl, "covariant Piola", (1, None), ("quadrilateral",)) +register_element("BDMCF", None, 1, HDiv, "contravariant Piola", (1, None), ("quadrilateral",)) +register_element("SminusE", "SminusE", 1, HCurl, "covariant Piola", (1, None), cubes[1:3]) +register_element("SminusF", "SminusF", 1, HDiv, "contravariant Piola", (1, None), cubes[1:2]) +register_element("SminusDiv", "SminusDiv", 1, HDiv, "contravariant Piola", (1, None), cubes[1:3]) +register_element("SminusCurl", "SminusCurl", 1, HCurl, "covariant Piola", (1, None), cubes[1:3]) +register_element("AAE", None, 1, HCurl, "covariant Piola", (1, None), ("hexahedron",)) +register_element("AAF", None, 1, HDiv, "contravariant Piola", (1, None), ("hexahedron",)) + +# New aliases introduced for the periodic table 2014 +register_alias("P", lambda family, dim, order, degree: ("Lagrange", order)) +register_alias("DP", lambda family, dim, order, + degree: ("Discontinuous Lagrange", order)) +register_alias("RTE", lambda family, dim, order, + degree: ("Nedelec 1st kind H(curl)", order)) +register_alias("RTF", lambda family, dim, order, + degree: ("Raviart-Thomas", order)) +register_alias("N1E", lambda family, dim, order, + degree: ("Nedelec 1st kind H(curl)", order)) +register_alias("N1F", lambda family, dim, order, degree: ("Raviart-Thomas", + order)) + +register_alias("BDME", lambda family, dim, order, + degree: ("Nedelec 2nd kind H(curl)", order)) +register_alias("BDMF", lambda family, dim, order, + degree: ("Brezzi-Douglas-Marini", order)) +register_alias("N2E", lambda family, dim, order, + degree: ("Nedelec 2nd kind H(curl)", order)) +register_alias("N2F", lambda family, dim, order, + degree: ("Brezzi-Douglas-Marini", order)) + +# discontinuous elements using l2 pullbacks +register_element("DPC L2", None, 0, L2, "L2 Piola", (1, None), cubes) +register_element("DQ L2", None, 0, L2, "L2 Piola", (0, None), cubes) +register_element("Gauss-Legendre L2", "GL L2", 0, L2, "L2 Piola", (0, None), + ("interval",)) +register_element("Discontinuous Lagrange L2", "DG L2", 0, L2, "L2 Piola", (0, None), + any_cell) # "DP" + +register_alias("DP L2", lambda family, dim, order, + degree: ("Discontinuous Lagrange L2", order)) + +register_alias("P- Lambda L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) +register_alias("P Lambda L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) +register_alias("Q- Lambda L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) +register_alias("S Lambda L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) + +register_alias("P- L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) +register_alias("Q- L2", lambda family, dim, order, + degree: feec_element_l2(family, dim, order, degree)) + +# mimetic spectral elements - primal and dual complexs +register_element("Extended-Gauss-Legendre", "EGL", 0, H1, "identity", (2, None), + ("interval",)) +register_element("Extended-Gauss-Legendre Edge", "EGL-Edge", 0, L2, "identity", (1, None), + ("interval",)) +register_element("Extended-Gauss-Legendre Edge L2", "EGL-Edge L2", 0, L2, "L2 Piola", (1, None), + ("interval",)) +register_element("Gauss-Lobatto-Legendre Edge", "GLL-Edge", 0, L2, "identity", (0, None), + ("interval",)) +register_element("Gauss-Lobatto-Legendre Edge L2", "GLL-Edge L2", 0, L2, "L2 Piola", (0, None), + ("interval",)) + +# directly-defined serendipity elements ala Arbogast +# currently the theory is only really worked out for quads. +register_element("Direct Serendipity", "Sdirect", 0, H1, "physical", (1, None), + ("quadrilateral",)) +register_element("Direct Serendipity Full H(div)", "Sdirect H(div)", 1, HDiv, "physical", (1, None), + ("quadrilateral",)) +register_element("Direct Serendipity Reduced H(div)", "Sdirect H(div) red", 1, HDiv, "physical", (1, None), + ("quadrilateral",)) + + +# NOTE- the edge elements for primal mimetic spectral elements are accessed by using +# variant='mse' in the appropriate places + +def feec_element(family, n, r, k): + """Finite element exterior calculus notation. + + n = topological dimension of domain + r = polynomial order + k = form_degree + """ + # Note: We always map to edge elements in 2D, don't know how to + # differentiate otherwise? + + # Mapping from (feec name, domain dimension, form degree) to + # (family name, polynomial order) + _feec_elements = { + "P- Lambda": ( + (("P", r), ("DP", r - 1)), + (("P", r), ("RTE", r), ("DP", r - 1)), + (("P", r), ("N1E", r), ("N1F", r), ("DP", r - 1)), + ), + "P Lambda": ( + (("P", r), ("DP", r)), + (("P", r), ("BDME", r), ("DP", r)), + (("P", r), ("N2E", r), ("N2F", r), ("DP", r)), + ), + "Q- Lambda": ( + (("Q", r), ("DQ", r - 1)), + (("Q", r), ("RTCE", r), ("DQ", r - 1)), + (("Q", r), ("NCE", r), ("NCF", r), ("DQ", r - 1)), + ), + "S Lambda": ( + (("S", r), ("DPC", r)), + (("S", r), ("BDMCE", r), ("DPC", r)), + (("S", r), ("AAE", r), ("AAF", r), ("DPC", r)), + ), + } + + # New notation, old verbose notation (including "Lambda") might be + # removed + _feec_elements["P-"] = _feec_elements["P- Lambda"] + _feec_elements["P"] = _feec_elements["P Lambda"] + _feec_elements["Q-"] = _feec_elements["Q- Lambda"] + _feec_elements["S"] = _feec_elements["S Lambda"] + + family, r = _feec_elements[family][n - 1][k] + + return family, r + + +def feec_element_l2(family, n, r, k): + """Finite element exterior calculus notation. + + n = topological dimension of domain + r = polynomial order + k = form_degree + """ + # Note: We always map to edge elements in 2D, don't know how to + # differentiate otherwise? + + # Mapping from (feec name, domain dimension, form degree) to + # (family name, polynomial order) + _feec_elements = { + "P- Lambda L2": ( + (("P", r), ("DP L2", r - 1)), + (("P", r), ("RTE", r), ("DP L2", r - 1)), + (("P", r), ("N1E", r), ("N1F", r), ("DP L2", r - 1)), + ), + "P Lambda L2": ( + (("P", r), ("DP L2", r)), + (("P", r), ("BDME", r), ("DP L2", r)), + (("P", r), ("N2E", r), ("N2F", r), ("DP L2", r)), + ), + "Q- Lambda L2": ( + (("Q", r), ("DQ L2", r - 1)), + (("Q", r), ("RTCE", r), ("DQ L2", r - 1)), + (("Q", r), ("NCE", r), ("NCF", r), ("DQ L2", r - 1)), + ), + "S Lambda L2": ( + (("S", r), ("DPC L2", r)), + (("S", r), ("BDMCE", r), ("DPC L2", r)), + (("S", r), ("AAE", r), ("AAF", r), ("DPC L2", r)), + ), + } + + # New notation, old verbose notation (including "Lambda") might be + # removed + _feec_elements["P- L2"] = _feec_elements["P- Lambda L2"] + _feec_elements["P L2"] = _feec_elements["P Lambda L2"] + _feec_elements["Q- L2"] = _feec_elements["Q- Lambda L2"] + _feec_elements["S L2"] = _feec_elements["S Lambda L2"] + + family, r = _feec_elements[family][n - 1][k] + + return family, r + + +# General FEEC notation, old verbose (can be removed) +register_alias("P- Lambda", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) +register_alias("P Lambda", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) +register_alias("Q- Lambda", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) +register_alias("S Lambda", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) + +# General FEEC notation, new compact notation +register_alias("P-", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) +register_alias("Q-", lambda family, dim, order, + degree: feec_element(family, dim, order, degree)) + + +def canonical_element_description(family, cell, order, form_degree): + """Given basic element information, return corresponding element information on canonical form. + + Input: family, cell, (polynomial) order, form_degree + Output: family (canonical), short_name (for printing), order, value shape, + reference value shape, sobolev_space. + + This is used by the FiniteElement constructor to ved input + data against the element list and aliases defined in ufl. + """ + # Get domain dimensions + if cell is not None: + tdim = cell.topological_dimension() + if isinstance(cell, Cell): + cellname = cell.cellname() + else: + cellname = None + else: + tdim = None + cellname = None + + # Catch general FEEC notation "P" and "S" + if form_degree is not None and family in ("P", "S"): + family, order = feec_element(family, tdim, order, form_degree) + + if form_degree is not None and family in ("P L2", "S L2"): + family, order = feec_element_l2(family, tdim, order, form_degree) + + # Check whether this family is an alias for something else + while family in aliases: + if tdim is None: + raise ValueError("Need dimension to handle element aliases.") + (family, order) = aliases[family](family, tdim, order, form_degree) + + # Check that the element family exists + if family not in ufl_elements: + raise ValueError(f"Unknown finite element '{family}'.") + + # Check that element data is valid (and also get common family + # name) + (family, short_name, value_rank, sobolev_space, mapping, krange, cellnames) = ufl_elements[family] + + # Accept CG/DG on all kind of cells, but use Q/DQ on "product" cells + if cellname in set(cubes) - set(simplices) or isinstance(cell, TensorProductCell): + if family == "Lagrange": + family = "Q" + elif family == "Discontinuous Lagrange": + if order >= 1: + warnings.warn("Discontinuous Lagrange element requested on %s, creating DQ element." % cell.cellname()) + family = "DQ" + elif family == "Discontinuous Lagrange L2": + if order >= 1: + warnings.warn(f"Discontinuous Lagrange L2 element requested on {cell.cellname()}, " + "creating DQ L2 element.") + family = "DQ L2" + + # Validate cellname if a valid cell is specified + if not (cellname is None or cellname in cellnames): + raise ValueError(f"Cellname '{cellname}' invalid for '{family}' finite element.") + + # Validate order if specified + if order is not None: + if krange is None: + raise ValueError(f"Order {order} invalid for '{family}' finite element, should be None.") + kmin, kmax = krange + if not (kmin is None or (asarray(order) >= kmin).all()): + raise ValueError(f"Order {order} invalid for '{family}' finite element.") + if not (kmax is None or (asarray(order) <= kmax).all()): + raise ValueError(f"Order {istr(order)} invalid for '{family}' finite element.") + + if value_rank == 2: + # Tensor valued fundamental elements in HEin have this shape + if tdim is None: + raise ValueError("Cannot infer shape of element without topological and geometric dimensions.") + reference_value_shape = (tdim, tdim) + elif value_rank == 1: + # Vector valued fundamental elements in HDiv and HCurl have a shape + if tdim is None: + raise ValueError("Cannot infer shape of element without topological and geometric dimensions.") + reference_value_shape = (tdim,) + elif value_rank == 0: + # All other elements are scalar values + reference_value_shape = () + else: + raise ValueError(f"Invalid value rank {value_rank}.") + + embedded_degree = order + if any(bubble in family for bubble in ("Guzman-Neilan", "Bernardi-Raugel")): + embedded_degree = tdim + return family, short_name, order, reference_value_shape, sobolev_space, mapping, embedded_degree diff --git a/finat/ufl/enrichedelement.py b/finat/ufl/enrichedelement.py new file mode 100644 index 000000000..a054885bb --- /dev/null +++ b/finat/ufl/enrichedelement.py @@ -0,0 +1,154 @@ +"""This module defines the UFL finite element classes.""" + +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from finat.ufl.finiteelementbase import FiniteElementBase + + +class EnrichedElementBase(FiniteElementBase): + """The vector sum of several finite element spaces.""" + + def __init__(self, *elements): + """Doc.""" + self._elements = elements + + cell = elements[0].cell + if not all(e.cell == cell for e in elements[1:]): + raise ValueError("Cell mismatch for sub elements of enriched element.") + + if isinstance(elements[0].degree(), int): + degrees = {e.degree() for e in elements} - {None} + degree = max(degrees) if degrees else None + else: + degree = tuple(map(max, zip(*[e.degree() for e in elements]))) + + # We can allow the scheme not to be defined, but all defined + # should be equal + quad_schemes = [e.quadrature_scheme() for e in elements] + quad_schemes = [qs for qs in quad_schemes if qs is not None] + quad_scheme = quad_schemes[0] if quad_schemes else None + if not all(qs == quad_scheme for qs in quad_schemes): + raise ValueError("Quadrature scheme mismatch.") + + reference_value_shape = elements[0].reference_value_shape + if not all(e.reference_value_shape == reference_value_shape for e in elements[1:]): + raise ValueError("Element reference value shape mismatch.") + + # mapping = elements[0].mapping() # FIXME: This fails for a mixed subelement here. + # if not all(e.mapping() == mapping for e in elements[1:]): + # raise ValueError("Element mapping mismatch.") + + # Get name of subclass: EnrichedElement or NodalEnrichedElement + class_name = self.__class__.__name__ + + # Initialize element data + FiniteElementBase.__init__(self, class_name, cell, degree, + quad_scheme, reference_value_shape) + + def mapping(self): + """Doc.""" + return self._elements[0].mapping() + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + elements = [e for e in self._elements] + if all(e.sobolev_space == elements[0].sobolev_space + for e in elements): + return elements[0].sobolev_space + else: + # Find smallest shared Sobolev space over all sub elements + spaces = [e.sobolev_space for e in elements] + superspaces = [{s} | set(s.parents) for s in spaces] + intersect = set.intersection(*superspaces) + for s in intersect.copy(): + for parent in s.parents: + intersect.discard(parent) + + sobolev_space, = intersect + return sobolev_space + + def variant(self): + """Doc.""" + try: + variant, = {e.variant() for e in self._elements} + return variant + except ValueError: + return None + + def reconstruct(self, **kwargs): + """Doc.""" + return type(self)(*[e.reconstruct(**kwargs) for e in self._elements]) + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return min(e.embedded_subdegree for e in self._elements) + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return max(e.embedded_superdegree for e in self._elements) + + +class EnrichedElement(EnrichedElementBase): + r"""The vector sum of several finite element spaces. + + .. math:: \\textrm{EnrichedElement}(V, Q) = \\{v + q | v \\in V, q \\in Q\\}. + + Dual basis is a concatenation of subelements dual bases; + primal basis is a concatenation of subelements primal bases; + resulting element is not nodal even when subelements are. + Structured basis may be exploited in form compilers. + """ + + def is_cellwise_constant(self): + """Return whether the basis functions of this element is spatially constant over each cell.""" + return all(e.is_cellwise_constant() for e in self._elements) + + def __repr__(self): + """Doc.""" + return "EnrichedElement(" + ", ".join(repr(e) for e in self._elements) + ")" + + def __str__(self): + """Format as string for pretty printing.""" + return "<%s>" % " + ".join(str(e) for e in self._elements) + + def shortstr(self): + """Format as string for pretty printing.""" + return "<%s>" % " + ".join(e.shortstr() for e in self._elements) + + +class NodalEnrichedElement(EnrichedElementBase): + r"""The vector sum of several finite element spaces. + + .. math:: \\textrm{EnrichedElement}(V, Q) = \\{v + q | v \\in V, q \\in Q\\}. + + Primal basis is reorthogonalized to dual basis which is + a concatenation of subelements dual bases; resulting + element is nodal. + """ + def is_cellwise_constant(self): + """Return whether the basis functions of this element is spatially constant over each cell.""" + return False + + def __repr__(self): + """Doc.""" + return "NodalEnrichedElement(" + ", ".join(repr(e) for e in self._elements) + ")" + + def __str__(self): + """Format as string for pretty printing.""" + return "" % ", ".join(str(e) for e in self._elements) + + def shortstr(self): + """Format as string for pretty printing.""" + return "NodalEnriched(%s)" % ", ".join(e.shortstr() for e in self._elements) diff --git a/finat/ufl/finiteelement.py b/finat/ufl/finiteelement.py new file mode 100644 index 000000000..4cf34c90d --- /dev/null +++ b/finat/ufl/finiteelement.py @@ -0,0 +1,257 @@ +"""This module defines the UFL finite element classes.""" +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Anders Logg 2014 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from ufl.cell import TensorProductCell, as_cell +from finat.ufl.elementlist import canonical_element_description, simplices +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.utils.formatting import istr + + +class FiniteElement(FiniteElementBase): + """The basic finite element class for all simple finite elements.""" + # TODO: Move these to base? + __slots__ = ("_short_name", "_sobolev_space", + "_mapping", "_variant", "_repr") + + def __new__(cls, + family, + cell=None, + degree=None, + form_degree=None, + quad_scheme=None, + variant=None): + """Intercepts construction to expand CG, DG, RTCE and RTCF spaces on TensorProductCells.""" + if cell is not None: + cell = as_cell(cell) + + if isinstance(cell, TensorProductCell): + # Delay import to avoid circular dependency at module load time + from finat.ufl.enrichedelement import EnrichedElement + from finat.ufl.hdivcurl import HCurlElement as HCurl + from finat.ufl.hdivcurl import HDivElement as HDiv + from finat.ufl.tensorproductelement import TensorProductElement + + family, short_name, degree, reference_value_shape, sobolev_space, mapping, embedded_degree = \ + canonical_element_description(family, cell, degree, form_degree) + + if family in ["RTCF", "RTCE"]: + cell_h, cell_v = cell.sub_cells() + if cell_h.cellname() != "interval": + raise ValueError(f"{family} is available on TensorProductCell(interval, interval) only.") + if cell_v.cellname() != "interval": + raise ValueError(f"{family} is available on TensorProductCell(interval, interval) only.") + + C_elt = FiniteElement("CG", "interval", degree, variant=variant) + D_elt = FiniteElement("DG", "interval", degree - 1, variant=variant) + + CxD_elt = TensorProductElement(C_elt, D_elt, cell=cell) + DxC_elt = TensorProductElement(D_elt, C_elt, cell=cell) + + if family == "RTCF": + return EnrichedElement(HDiv(CxD_elt), HDiv(DxC_elt)) + if family == "RTCE": + return EnrichedElement(HCurl(CxD_elt), HCurl(DxC_elt)) + + elif family == "NCF": + cell_h, cell_v = cell.sub_cells() + if cell_h.cellname() != "quadrilateral": + raise ValueError(f"{family} is available on TensorProductCell(quadrilateral, interval) only.") + if cell_v.cellname() != "interval": + raise ValueError(f"{family} is available on TensorProductCell(quadrilateral, interval) only.") + + Qc_elt = FiniteElement("RTCF", "quadrilateral", degree, variant=variant) + Qd_elt = FiniteElement("DQ", "quadrilateral", degree - 1, variant=variant) + + Id_elt = FiniteElement("DG", "interval", degree - 1, variant=variant) + Ic_elt = FiniteElement("CG", "interval", degree, variant=variant) + + return EnrichedElement(HDiv(TensorProductElement(Qc_elt, Id_elt, cell=cell)), + HDiv(TensorProductElement(Qd_elt, Ic_elt, cell=cell))) + + elif family == "NCE": + cell_h, cell_v = cell.sub_cells() + if cell_h.cellname() != "quadrilateral": + raise ValueError(f"{family} is available on TensorProductCell(quadrilateral, interval) only.") + if cell_v.cellname() != "interval": + raise ValueError(f"{family} is available on TensorProductCell(quadrilateral, interval) only.") + + Qc_elt = FiniteElement("Q", "quadrilateral", degree, variant=variant) + Qd_elt = FiniteElement("RTCE", "quadrilateral", degree, variant=variant) + + Id_elt = FiniteElement("DG", "interval", degree - 1, variant=variant) + Ic_elt = FiniteElement("CG", "interval", degree, variant=variant) + + return EnrichedElement(HCurl(TensorProductElement(Qc_elt, Id_elt, cell=cell)), + HCurl(TensorProductElement(Qd_elt, Ic_elt, cell=cell))) + + elif family == "Q": + return TensorProductElement(*[FiniteElement("CG", c, degree, variant=variant) + for c in cell.sub_cells()], + cell=cell) + + elif family == "DQ": + def dq_family(cell): + """Doc.""" + return "DG" if cell.cellname() in simplices else "DQ" + return TensorProductElement(*[FiniteElement(dq_family(c), c, degree, variant=variant) + for c in cell.sub_cells()], + cell=cell) + + elif family == "DQ L2": + def dq_family_l2(cell): + """Doc.""" + return "DG L2" if cell.cellname() in simplices else "DQ L2" + return TensorProductElement(*[FiniteElement(dq_family_l2(c), c, degree, variant=variant) + for c in cell.sub_cells()], + cell=cell) + + return super().__new__(cls) + + def __init__(self, + family, + cell=None, + degree=None, + form_degree=None, + quad_scheme=None, + variant=None): + """Create finite element. + + :arg family: The finite element family + :arg cell: The geometric cell + :arg degree: The polynomial degree (optional) + :arg form_degree: The form degree (FEEC notation, used when field is + viewed as k-form) + :arg quad_scheme: The quadrature scheme (optional) + :arg variant: Hint for the local basis function variant (optional) + + """ + # Note: Unfortunately, dolfin sometimes passes None for + # cell. Until this is fixed, allow it: + if cell is not None: + cell = as_cell(cell) + + ( + family, short_name, degree, reference_value_shape, sobolev_space, mapping, embedded_degree + ) = canonical_element_description(family, cell, degree, form_degree) + + # TODO: Move these to base? Might be better to instead + # simplify base though. + self._sobolev_space = sobolev_space + self._mapping = mapping + self._short_name = short_name or family + self._variant = variant + self._embedded_degree = embedded_degree + + # Type check variant + if variant is not None and not isinstance(variant, str): + raise ValueError("Illegal variant: must be string or None") + + # Initialize element data + FiniteElementBase.__init__(self, family, cell, degree, quad_scheme, + reference_value_shape) + + # Cache repr string + qs = self.quadrature_scheme() + if qs is None: + quad_str = "" + else: + quad_str = ", quad_scheme=%s" % repr(qs) + v = self.variant() + if v is None: + var_str = "" + else: + var_str = ", variant=%s" % repr(v) + self._repr = "FiniteElement(%s, %s, %s%s%s)" % ( + repr(self.family()), repr(self.cell), repr(self.degree()), quad_str, var_str) + assert '"' not in self._repr + + def __repr__(self): + """Format as string for evaluation as Python object.""" + return self._repr + + def _is_globally_constant(self): + """Doc.""" + return self.family() == "Real" + + def _is_linear(self): + """Doc.""" + return self.family() == "Lagrange" and self.degree() == 1 + + def mapping(self): + """Return the mapping type for this element .""" + return self._mapping + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + return self._sobolev_space + + def variant(self): + """Return the variant used to initialise the element.""" + return self._variant + + def reconstruct(self, family=None, cell=None, degree=None, quad_scheme=None, variant=None): + """Construct a new FiniteElement object with some properties replaced with new values.""" + if family is None: + family = self.family() + if cell is None: + cell = self.cell + if degree is None: + degree = self.degree() + if quad_scheme is None: + quad_scheme = self.quadrature_scheme() + if variant is None: + variant = self.variant() + return FiniteElement(family, cell, degree, quad_scheme=quad_scheme, variant=variant) + + def __str__(self): + """Format as string for pretty printing.""" + qs = self.quadrature_scheme() + qs = "" if qs is None else "(%s)" % qs + v = self.variant() + v = "" if v is None else "(%s)" % v + return "<%s%s%s%s on a %s>" % (self._short_name, istr(self.degree()), + qs, v, self.cell) + + def shortstr(self): + """Format as string for pretty printing.""" + return f"{self._short_name}{istr(self.degree())}({self.quadrature_scheme()},{istr(self.variant())})" + + def __getnewargs__(self): + """Return the arguments which pickle needs to recreate the object.""" + return (self.family(), + self.cell, + self.degree(), + None, + self.quadrature_scheme(), + self.variant()) + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + subdegree = self.degree() + if not isinstance(subdegree, int): + subdegree = min(subdegree) + if isinstance(self._embedded_degree, int): + subdegree = min(subdegree, self._embedded_degree) + return subdegree + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + superdegree = self.degree() + if not isinstance(superdegree, int): + superdegree = max(superdegree) + if isinstance(self._embedded_degree, int): + superdegree = max(superdegree, self._embedded_degree) + return superdegree diff --git a/finat/ufl/finiteelementbase.py b/finat/ufl/finiteelementbase.py new file mode 100644 index 000000000..3a84f908e --- /dev/null +++ b/finat/ufl/finiteelementbase.py @@ -0,0 +1,268 @@ +"""This module defines the UFL finite element classes.""" + +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from abc import abstractmethod, abstractproperty +from hashlib import md5 + +from ufl import pullback +from ufl.cell import AbstractCell, as_cell +from ufl.finiteelement import AbstractFiniteElement +from ufl.utils.sequences import product + + +class FiniteElementBase(AbstractFiniteElement): + """Base class for all finite elements.""" + __slots__ = ("_family", "_cell", "_degree", "_quad_scheme", + "_reference_value_shape") + + # TODO: Not all these should be in the base class! In particular + # family, degree, and quad_scheme do not belong here. + def __init__(self, family, cell, degree, quad_scheme, + reference_value_shape): + """Initialize basic finite element data.""" + if not (degree is None or isinstance(degree, (int, tuple))): + raise ValueError("Invalid degree type.") + if not isinstance(reference_value_shape, tuple): + raise ValueError("Invalid reference_value_shape type.") + + if cell is not None: + cell = as_cell(cell) + if not isinstance(cell, AbstractCell): + raise ValueError("Invalid cell type.") + + self._family = family + self._cell = cell + self._degree = degree + self._reference_value_shape = reference_value_shape + self._quad_scheme = quad_scheme + + @abstractmethod + def __repr__(self): + """Format as string for evaluation as Python object.""" + pass + + @abstractproperty + def sobolev_space(self): + """Return the underlying Sobolev space.""" + pass + + @abstractmethod + def mapping(self): + """Return the mapping type for this element.""" + pass + + def _is_globally_constant(self): + """Check if the element is a global constant. + + For Real elements, this should return True. + """ + return False + + def _is_linear(self): + """Check if the element is Lagrange degree 1.""" + return False + + def _ufl_hash_data_(self): + """Doc.""" + return repr(self) + + def _ufl_signature_data_(self): + """Doc.""" + return repr(self) + + def __hash__(self): + """Compute hash code for insertion in hashmaps.""" + return int.from_bytes(md5(self._ufl_hash_data_().encode()).digest(), byteorder='big') + + def __eq__(self, other): + """Compute element equality for insertion in hashmaps.""" + return type(self) is type(other) and self._ufl_hash_data_() == other._ufl_hash_data_() + + def __ne__(self, other): + """Compute element inequality for insertion in hashmaps.""" + return not self.__eq__(other) + + def __lt__(self, other): + """Compare elements by repr, to give a natural stable sorting.""" + return repr(self) < repr(other) + + def family(self): # FIXME: Undefined for base? + """Return finite element family.""" + return self._family + + def variant(self): + """Return the variant used to initialise the element.""" + return None + + def degree(self, component=None): + """Return polynomial degree of finite element.""" + return self._degree + + def quadrature_scheme(self): + """Return quadrature scheme of finite element.""" + return self._quad_scheme + + @property + def cell(self): + """Return cell of finite element.""" + return self._cell + + def is_cellwise_constant(self, component=None): + """Return whether the basis functions of this element is spatially constant over each cell.""" + return self._is_globally_constant() or self.degree() == 0 + + @property + def reference_value_shape(self): + """Return the shape of the value space on the reference cell.""" + return self._reference_value_shape + + @property + def reference_value_size(self): + """Return the integer product of the reference value shape.""" + return product(self.reference_value_shape) + + def symmetry(self): # FIXME: different approach + r"""Return the symmetry dict. + + This is a mapping :math:`c_0 \\to c_1` + meaning that component :math:`c_0` is represented by component + :math:`c_1`. + A component is a tuple of one or more ints. + """ + return {} + + def _check_component(self, domain, i): + """Check that component index i is valid.""" + sh = self.value_shape(domain.geometric_dimension()) + r = len(sh) + if not (len(i) == r and all(j < k for (j, k) in zip(i, sh))): + raise ValueError( + f"Illegal component index {i} (value rank {len(i)}) " + f"for element (value rank {r}).") + + def extract_subelement_component(self, domain, i): + """Extract direct subelement index and subelement relative component index for a given component index.""" + if isinstance(i, int): + i = (i,) + self._check_component(domain, i) + return (None, i) + + def extract_component(self, domain, i): + """Recursively extract component index relative to a (simple) element. + + and that element for given value component index. + """ + if isinstance(i, int): + i = (i,) + self._check_component(domain, i) + return (i, self) + + def _check_reference_component(self, i): + """Check that reference component index i is valid.""" + sh = self.reference_value_shape + r = len(sh) + if not (len(i) == r and all(j < k for (j, k) in zip(i, sh))): + raise ValueError( + f"Illegal component index {i} (value rank {len(i)}) " + f"for element (value rank {r}).") + + def extract_subelement_reference_component(self, i): + """Extract direct subelement index and subelement relative. + + reference component index for a given reference component index. + """ + if isinstance(i, int): + i = (i,) + self._check_reference_component(i) + return (None, i) + + def extract_reference_component(self, i): + """Recursively extract reference component index relative to a (simple) element. + + and that element for given reference value component index. + """ + if isinstance(i, int): + i = (i,) + self._check_reference_component(i) + return (i, self) + + @property + def num_sub_elements(self): + """Return number of sub-elements.""" + return 0 + + @property + def sub_elements(self): + """Return list of sub-elements.""" + return [] + + def __add__(self, other): + """Add two elements, creating an enriched element.""" + if not isinstance(other, FiniteElementBase): + raise ValueError(f"Can't add element and {other.__class__}.") + from finat.ufl import EnrichedElement + return EnrichedElement(self, other) + + def __mul__(self, other): + """Multiply two elements, creating a mixed element.""" + if not isinstance(other, FiniteElementBase): + raise ValueError("Can't multiply element and {other.__class__}.") + from finat.ufl import MixedElement + return MixedElement(self, other) + + def __getitem__(self, index): + """Restrict finite element to a subdomain, subcomponent or topology (cell).""" + from finat.ufl.restrictedelement import valid_restriction_domains + if index in valid_restriction_domains: + from finat.ufl import RestrictedElement + return RestrictedElement(self, index) + else: + raise KeyError(f"Invalid index for restriction: {repr(index)}") + + def __iter__(self): + """Iter.""" + raise TypeError(f"'{type(self).__name__}' object is not iterable") + + @property + def embedded_superdegree(self): + """Doc.""" + return self.degree() + + @property + def embedded_subdegree(self): + """Doc.""" + return self.degree() + + @property + def pullback(self): + """Get the pull back.""" + if self.mapping() == "identity": + return pullback.identity_pullback + elif self.mapping() == "L2 Piola": + return pullback.l2_piola + elif self.mapping() == "covariant Piola": + return pullback.covariant_piola + elif self.mapping() == "contravariant Piola": + return pullback.contravariant_piola + elif self.mapping() == "double covariant Piola": + return pullback.double_covariant_piola + elif self.mapping() == "double contravariant Piola": + return pullback.double_contravariant_piola + elif self.mapping() == "covariant contravariant Piola": + return pullback.covariant_contravariant_piola + elif self.mapping() == "custom": + return pullback.custom_pullback + elif self.mapping() == "physical": + return pullback.physical_pullback + + raise ValueError(f"Unsupported mapping: {self.mapping()}") diff --git a/finat/ufl/hdivcurl.py b/finat/ufl/hdivcurl.py new file mode 100644 index 000000000..ed188038e --- /dev/null +++ b/finat/ufl/hdivcurl.py @@ -0,0 +1,244 @@ +"""Doc.""" +# Copyright (C) 2008-2016 Andrew T. T. McRae +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.sobolevspace import L2, SobolevSpace +from ufl.sobolevspace import HCurl as HCurlSobolevSpace +from ufl.sobolevspace import HDiv as HDivSobolevSpace + + +class CallableSobolevSpace(SobolevSpace): + """A Sobolev space that can be called to create HDiv and HCurl elements.""" + + def __init__(self, name, parents=None): + super().__init__(name, parents) + + def __call__(self, element): + """Syntax shortcut to create a HDivElement or HCurlElement.""" + if self.name == "HDiv": + return HDivElement(element) + elif self.name == "HCurl": + return HCurlElement(element) + raise NotImplementedError( + "SobolevSpace has no call operator (only the specific HDiv and HCurl instances)." + ) + + +HCurl = CallableSobolevSpace(HCurlSobolevSpace.name, HCurlSobolevSpace.parents) +HDiv = CallableSobolevSpace(HDivSobolevSpace.name, HDivSobolevSpace.parents) + + +class HDivElement(FiniteElementBase): + """A div-conforming version of an outer product element, assuming this makes mathematical sense.""" + __slots__ = ("_element", ) + + def __init__(self, element): + """Doc.""" + self._element = element + + family = "TensorProductElement" + cell = element.cell + degree = element.degree() + quad_scheme = element.quadrature_scheme() + reference_value_shape = (element.cell.topological_dimension(),) + + # Skipping TensorProductElement constructor! Bad code smell, refactor to avoid this non-inheritance somehow. + FiniteElementBase.__init__(self, family, cell, degree, + quad_scheme, reference_value_shape) + + def __repr__(self): + """Doc.""" + return f"HDivElement({repr(self._element)})" + + def mapping(self): + """Doc.""" + return "contravariant Piola" + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + return HDivSobolevSpace + + def reconstruct(self, **kwargs): + """Doc.""" + return HDivElement(self._element.reconstruct(**kwargs)) + + def variant(self): + """Doc.""" + return self._element.variant() + + def __str__(self): + """Doc.""" + return f"HDivElement({repr(self._element)})" + + def shortstr(self): + """Format as string for pretty printing.""" + return f"HDivElement({self._element.shortstr()})" + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return self._element.embedded_subdegree + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return self._element.embedded_superdegree + + +class HCurlElement(FiniteElementBase): + """A curl-conforming version of an outer product element, assuming this makes mathematical sense.""" + __slots__ = ("_element",) + + def __init__(self, element): + """Doc.""" + self._element = element + + family = "TensorProductElement" + cell = element.cell + degree = element.degree() + quad_scheme = element.quadrature_scheme() + cell = element.cell + reference_value_shape = (cell.topological_dimension(),) # TODO: Is this right? + # Skipping TensorProductElement constructor! Bad code smell, + # refactor to avoid this non-inheritance somehow. + FiniteElementBase.__init__(self, family, cell, degree, quad_scheme, + reference_value_shape) + + def __repr__(self): + """Doc.""" + return f"HCurlElement({repr(self._element)})" + + def mapping(self): + """Doc.""" + return "covariant Piola" + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + return HCurlSobolevSpace + + def reconstruct(self, **kwargs): + """Doc.""" + return HCurlElement(self._element.reconstruct(**kwargs)) + + def variant(self): + """Doc.""" + return self._element.variant() + + def __str__(self): + """Doc.""" + return f"HCurlElement({repr(self._element)})" + + def shortstr(self): + """Format as string for pretty printing.""" + return f"HCurlElement({self._element.shortstr()})" + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return self._element.embedded_subdegree + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return self._element.embedded_superdegree + + +class WithMapping(FiniteElementBase): + """Specify an alternative mapping for the wrappee. + + For example, + to use identity mapping instead of Piola map with an element E, + write + remapped = WithMapping(E, "identity") + """ + + def __init__(self, wrapee, mapping): + """Doc.""" + if mapping == "symmetries": + raise ValueError("Can't change mapping to 'symmetries'") + self._mapping = mapping + self.wrapee = wrapee + + def __getattr__(self, attr): + """Doc.""" + try: + return getattr(self.wrapee, attr) + except AttributeError: + raise AttributeError("'%s' object has no attribute '%s'" % + (type(self).__name__, attr)) + + def __repr__(self): + """Doc.""" + return f"WithMapping({repr(self.wrapee)}, '{self._mapping}')" + + def value_shape(self, domain): + """Doc.""" + gdim = domain.geometric_dimension() + mapping = self.mapping() + if mapping in {"covariant Piola", "contravariant Piola"}: + return (gdim,) + elif mapping in {"double covariant Piola", "double contravariant Piola"}: + return (gdim, gdim) + else: + return self.wrapee.value_shape(domain) + + @property + def reference_value_shape(self): + """Doc.""" + tdim = self.cell.topological_dimension() + mapping = self.mapping() + if mapping in {"covariant Piola", "contravariant Piola"}: + return (tdim,) + elif mapping in {"double covariant Piola", "double contravariant Piola"}: + return (tdim, tdim) + else: + return self.wrapee.reference_value_shape + + def mapping(self): + """Doc.""" + return self._mapping + + @property + def sobolev_space(self): + """Return the underlying Sobolev space.""" + if self.wrapee.mapping() == self.mapping(): + return self.wrapee.sobolev_space + else: + return L2 + + def reconstruct(self, **kwargs): + """Doc.""" + mapping = kwargs.pop("mapping", self._mapping) + wrapee = self.wrapee.reconstruct(**kwargs) + return type(self)(wrapee, mapping) + + def variant(self): + """Doc.""" + return self.wrapee.variant() + + def __str__(self): + """Doc.""" + return f"WithMapping({repr(self.wrapee)}, {self._mapping})" + + def shortstr(self): + """Doc.""" + return f"WithMapping({self.wrapee.shortstr()}, {self._mapping})" + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return self._element.embedded_subdegree + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return self._element.embedded_superdegree diff --git a/finat/ufl/mixedelement.py b/finat/ufl/mixedelement.py new file mode 100644 index 000000000..e1dd1ccd5 --- /dev/null +++ b/finat/ufl/mixedelement.py @@ -0,0 +1,543 @@ +"""This module defines the UFL finite element classes.""" +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Anders Logg 2014 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +import numpy as np + +from ufl.cell import as_cell +from finat.ufl.finiteelement import FiniteElement +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.permutation import compute_indices +from ufl.pullback import MixedPullback, SymmetricPullback +from ufl.utils.indexflattening import flatten_multiindex, shape_to_strides, unflatten_index +from ufl.utils.sequences import max_degree, product + + +class MixedElement(FiniteElementBase): + """A finite element composed of a nested hierarchy of mixed or simple elements.""" + __slots__ = ("_sub_elements", "_cells") + + def __init__(self, *elements, **kwargs): + """Create mixed finite element from given list of elements.""" + if type(self) is MixedElement: + if kwargs: + raise ValueError("Not expecting keyword arguments to MixedElement constructor.") + + # Un-nest arguments if we get a single argument with a list of elements + if len(elements) == 1 and isinstance(elements[0], (tuple, list)): + elements = elements[0] + # Interpret nested tuples as sub-mixedelements recursively + elements = [MixedElement(e) if isinstance(e, (tuple, list)) else e + for e in elements] + self._sub_elements = elements + + # Pick the first cell, for now all should be equal + cells = tuple(sorted(set(element.cell for element in elements) - set([None]))) + self._cells = cells + if cells: + cell = cells[0] + # Require that all elements are defined on the same cell + if not all(c == cell for c in cells[1:]): + raise ValueError("Sub elements must live on the same cell.") + else: + cell = None + + # Check that all elements use the same quadrature scheme TODO: + # We can allow the scheme not to be defined. + if len(elements) == 0: + quad_scheme = None + else: + quad_scheme = elements[0].quadrature_scheme() + if not all(e.quadrature_scheme() == quad_scheme for e in elements): + raise ValueError("Quadrature scheme mismatch for sub elements of mixed element.") + + # Compute value sizes in global and reference configurations + reference_value_size_sum = sum(product(s.reference_value_shape) for s in self._sub_elements) + + # Default reference value shape: Treated simply as all + # subelement reference values unpacked in a vector. + reference_value_shape = kwargs.get('reference_value_shape', (reference_value_size_sum,)) + + # Initialize element data + degrees = {e.degree() for e in self._sub_elements} - {None} + degree = max_degree(degrees) if degrees else None + FiniteElementBase.__init__(self, "Mixed", cell, degree, quad_scheme, + reference_value_shape) + + def __repr__(self): + """Doc.""" + return "MixedElement(" + ", ".join(repr(e) for e in self._sub_elements) + ")" + + def _is_linear(self): + """Doc.""" + return all(i._is_linear() for i in self._sub_elements) + + def reconstruct_from_elements(self, *elements): + """Reconstruct a mixed element from new subelements.""" + if all(a == b for (a, b) in zip(elements, self._sub_elements)): + return self + return MixedElement(*elements) + + def symmetry(self, domain): + r"""Return the symmetry dict, which is a mapping :math:`c_0 \\to c_1`. + + meaning that component :math:`c_0` is represented by component + :math:`c_1`. + A component is a tuple of one or more ints. + """ + # Build symmetry map from symmetries of subelements + sm = {} + # Base index of the current subelement into mixed value + j = 0 + for e in self._sub_elements: + sh = e.value_shape(domain) + st = shape_to_strides(sh) + # Map symmetries of subelement into index space of this + # element + for c0, c1 in e.symmetry().items(): + j0 = flatten_multiindex(c0, st) + j + j1 = flatten_multiindex(c1, st) + j + sm[(j0,)] = (j1,) + # Update base index for next element + j += product(sh) + if j != product(self.value_shape(domain)): + raise ValueError("Size mismatch in symmetry algorithm.") + return sm or {} + + @property + def sobolev_space(self): + """Doc.""" + return max(e.sobolev_space for e in self._sub_elements) + + def mapping(self): + """Doc.""" + if all(e.mapping() == "identity" for e in self._sub_elements): + return "identity" + else: + return "undefined" + + @property + def num_sub_elements(self): + """Return number of sub elements.""" + return len(self._sub_elements) + + @property + def sub_elements(self): + """Return list of sub elements.""" + return self._sub_elements + + def extract_subelement_component(self, domain, i): + """Extract direct subelement index and subelement relative. + + component index for a given component index. + """ + if isinstance(i, int): + i = (i,) + self._check_component(i) + + # Select between indexing modes + if len(self.value_shape(domain)) == 1: + # Indexing into a long vector of flattened subelement + # shapes + j, = i + + # Find subelement for this index + for sub_element_index, e in enumerate(self._sub_elements): + sh = e.value_shape(domain) + si = product(sh) + if j < si: + break + j -= si + if j < 0: + raise ValueError("Moved past last value component!") + + # Convert index into a shape tuple + st = shape_to_strides(sh) + component = unflatten_index(j, st) + else: + # Indexing into a multidimensional tensor where subelement + # index is first axis + sub_element_index = i[0] + if sub_element_index >= len(self._sub_elements): + raise ValueError(f"Illegal component index (dimension {sub_element_index}).") + component = i[1:] + return (sub_element_index, component) + + def extract_component(self, i): + """Recursively extract component index relative to a (simple) element. + + and that element for given value component index. + """ + sub_element_index, component = self.extract_subelement_component(i) + return self._sub_elements[sub_element_index].extract_component(component) + + def extract_subelement_reference_component(self, i): + """Extract direct subelement index and subelement relative. + + reference_component index for a given reference_component index. + """ + if isinstance(i, int): + i = (i,) + self._check_reference_component(i) + + # Select between indexing modes + assert len(self.reference_value_shape) == 1 + # Indexing into a long vector of flattened subelement shapes + j, = i + + # Find subelement for this index + for sub_element_index, e in enumerate(self._sub_elements): + sh = e.reference_value_shape + si = product(sh) + if j < si: + break + j -= si + if j < 0: + raise ValueError("Moved past last value reference_component!") + + # Convert index into a shape tuple + st = shape_to_strides(sh) + reference_component = unflatten_index(j, st) + return (sub_element_index, reference_component) + + def extract_reference_component(self, i): + """Recursively extract reference_component index relative to a (simple) element. + + and that element for given value reference_component index. + """ + sub_element_index, reference_component = self.extract_subelement_reference_component(i) + return self._sub_elements[sub_element_index].extract_reference_component(reference_component) + + def is_cellwise_constant(self, component=None): + """Return whether the basis functions of this element is spatially constant over each cell.""" + if component is None: + return all(e.is_cellwise_constant() for e in self.sub_elements) + else: + i, e = self.extract_component(component) + return e.is_cellwise_constant() + + def degree(self, component=None): + """Return polynomial degree of finite element.""" + if component is None: + return self._degree # from FiniteElementBase, computed as max of subelements in __init__ + else: + i, e = self.extract_component(component) + return e.degree() + + @property + def embedded_subdegree(self): + """Return embedded subdegree.""" + return min(e.embedded_subdegree for e in self.sub_elements) + + @property + def embedded_superdegree(self): + """Return embedded superdegree.""" + return max(e.embedded_superdegree for e in self.sub_elements) + + def reconstruct(self, **kwargs): + """Doc.""" + return MixedElement(*[e.reconstruct(**kwargs) for e in self.sub_elements]) + + def variant(self): + """Doc.""" + try: + variant, = {e.variant() for e in self.sub_elements} + return variant + except ValueError: + return None + + def __str__(self): + """Format as string for pretty printing.""" + tmp = ", ".join(str(element) for element in self._sub_elements) + return "" + + def shortstr(self): + """Format as string for pretty printing.""" + tmp = ", ".join(element.shortstr() for element in self._sub_elements) + return "Mixed<" + tmp + ">" + + @property + def pullback(self): + """Get the pull back.""" + return MixedPullback(self) + + +class VectorElement(MixedElement): + """A special case of a mixed finite element where all elements are equal.""" + + __slots__ = ("_repr", "_mapping", "_sub_element") + + def __init__(self, family, cell=None, degree=None, dim=None, + form_degree=None, quad_scheme=None, variant=None): + """Create vector element (repeated mixed element).""" + if isinstance(family, FiniteElementBase): + sub_element = family + cell = sub_element.cell + variant = sub_element.variant() + else: + if cell is not None: + cell = as_cell(cell) + # Create sub element + sub_element = FiniteElement(family, cell, degree, + form_degree=form_degree, + quad_scheme=quad_scheme, + variant=variant) + + # Set default size if not specified + if dim is None: + if cell is None: + raise ValueError("Cannot infer vector dimension without a cell.") + # TODO: is this the right default + dim = cell.topological_dimension() + + self._mapping = sub_element.mapping() + # Create list of sub elements for mixed element constructor + sub_elements = [sub_element] * dim + + # Compute value shapes + reference_value_shape = (dim,) + sub_element.reference_value_shape + + # Initialize element data + MixedElement.__init__(self, sub_elements, + reference_value_shape=reference_value_shape) + + FiniteElementBase.__init__(self, sub_element.family(), sub_element.cell, sub_element.degree(), + sub_element.quadrature_scheme(), reference_value_shape) + + self._sub_element = sub_element + + if variant is None: + var_str = "" + else: + var_str = ", variant='" + variant + "'" + + # Cache repr string + self._repr = f"VectorElement({repr(sub_element)}, dim={dim}{var_str})" + + def __repr__(self): + """Doc.""" + return self._repr + + def reconstruct(self, **kwargs): + """Doc.""" + sub_element = self._sub_element.reconstruct(**kwargs) + return VectorElement(sub_element, dim=len(self.sub_elements)) + + def variant(self): + """Return the variant used to initialise the element.""" + return self._sub_element.variant() + + def mapping(self): + """Doc.""" + return self._mapping + + def __str__(self): + """Format as string for pretty printing.""" + return ("" % + (len(self._sub_elements), self._sub_element)) + + def shortstr(self): + """Format as string for pretty printing.""" + return "Vector<%d x %s>" % (len(self._sub_elements), + self._sub_element.shortstr()) + + @property + def pullback(self): + """Get the pull back.""" + return self._sub_element.pullback + + +class TensorElement(MixedElement): + """A special case of a mixed finite element where all elements are equal.""" + __slots__ = ("_sub_element", "_shape", "_symmetry", + "_sub_element_mapping", + "_flattened_sub_element_mapping", + "_mapping", "_repr") + + def __init__(self, family, cell=None, degree=None, shape=None, + symmetry=None, quad_scheme=None, variant=None): + """Create tensor element (repeated mixed element with optional symmetries).""" + if isinstance(family, FiniteElementBase): + sub_element = family + cell = sub_element.cell + variant = sub_element.variant() + else: + if cell is not None: + cell = as_cell(cell) + # Create scalar sub element + sub_element = FiniteElement(family, cell, degree, quad_scheme=quad_scheme, + variant=variant) + + # Set default shape if not specified + if shape is None: + if cell is None: + raise ValueError("Cannot infer tensor shape without a cell.") + # TODO: is this the right default + dim = cell.topological_dimension() + shape = (dim, dim) + + if symmetry is None: + symmetry = {} + elif symmetry is True: + # Construct default symmetry dict for matrix elements + if not (len(shape) == 2 and shape[0] == shape[1]): + raise ValueError("Cannot set automatic symmetry for non-square tensor.") + symmetry = dict(((i, j), (j, i)) for i in range(shape[0]) + for j in range(shape[1]) if i > j) + else: + if not isinstance(symmetry, dict): + raise ValueError("Expecting symmetry to be None (unset), True, or dict.") + + # Validate indices in symmetry dict + for i, j in symmetry.items(): + if len(i) != len(j): + raise ValueError("Non-matching length of symmetry index tuples.") + for k in range(len(i)): + if not (i[k] >= 0 and j[k] >= 0 and i[k] < shape[k] and j[k] < shape[k]): + raise ValueError("Symmetry dimensions out of bounds.") + + # Compute all index combinations for given shape + indices = compute_indices(shape) + + # Compute mapping from indices to sub element number, + # accounting for symmetry + sub_elements = [] + sub_element_mapping = {} + for index in indices: + if index in symmetry: + continue + sub_element_mapping[index] = len(sub_elements) + sub_elements += [sub_element] + + # Update mapping for symmetry + for index in indices: + if index in symmetry: + sub_element_mapping[index] = sub_element_mapping[symmetry[index]] + flattened_sub_element_mapping = [sub_element_mapping[index] for i, + index in enumerate(indices)] + + # Compute reference value shape based on symmetries + if symmetry: + reference_value_shape = (product(shape) - len(symmetry),) + self._mapping = "symmetries" + else: + reference_value_shape = shape + self._mapping = sub_element.mapping() + + reference_value_shape = reference_value_shape + sub_element.reference_value_shape + # Initialize element data + MixedElement.__init__(self, sub_elements, + reference_value_shape=reference_value_shape) + self._family = sub_element.family() + self._degree = sub_element.degree() + self._sub_element = sub_element + self._shape = shape + self._symmetry = symmetry + self._sub_element_mapping = sub_element_mapping + self._flattened_sub_element_mapping = flattened_sub_element_mapping + + if variant is None: + var_str = "" + else: + var_str = ", variant='" + variant + "'" + + # Cache repr string + self._repr = (f"TensorElement({repr(sub_element)}, shape={shape}, " + f"symmetry={symmetry}{var_str})") + + @property + def pullback(self): + """Get pull back.""" + if len(self._symmetry) > 0: + sub_element_value_shape = self.sub_elements[0].reference_value_shape + for e in self.sub_elements: + if e.reference_value_shape != sub_element_value_shape: + raise ValueError("Sub-elements must all have the same value size") + symmetry = {} + n = 0 + for i in np.ndindex(self._shape): + if i in self._symmetry and self._symmetry[i] in symmetry: + symmetry[i] = symmetry[self._symmetry[i]] + else: + symmetry[i] = n + n += 1 + return SymmetricPullback(self, symmetry) + + return self._sub_element.pullback + + def __repr__(self): + """Doc.""" + return self._repr + + def variant(self): + """Return the variant used to initialise the element.""" + return self._sub_element.variant() + + def mapping(self): + """Doc.""" + return self._mapping + + def flattened_sub_element_mapping(self): + """Doc.""" + return self._flattened_sub_element_mapping + + def extract_subelement_component(self, i): + """Extract direct subelement index and subelement relative. + + component index for a given component index. + """ + if isinstance(i, int): + i = (i,) + self._check_component(i) + + i = self.symmetry().get(i, i) + l = len(self._shape) # noqa: E741 + ii = i[:l] + jj = i[l:] + if ii not in self._sub_element_mapping: + raise ValueError(f"Illegal component index {i}.") + k = self._sub_element_mapping[ii] + return (k, jj) + + def symmetry(self): + r"""Return the symmetry dict, which is a mapping :math:`c_0 \\to c_1`. + + meaning that component :math:`c_0` is represented by component + :math:`c_1`. + A component is a tuple of one or more ints. + """ + return self._symmetry + + def reconstruct(self, **kwargs): + """Doc.""" + sub_element = self._sub_element.reconstruct(**kwargs) + return TensorElement(sub_element, shape=self._shape, symmetry=self._symmetry) + + def __str__(self): + """Format as string for pretty printing.""" + if self._symmetry: + tmp = ", ".join("%s -> %s" % (a, b) for (a, b) in self._symmetry.items()) + sym = " with symmetries (%s)" % tmp + else: + sym = "" + return ("" % + (self.reference_value_shape, self._sub_element, sym)) + + def shortstr(self): + """Format as string for pretty printing.""" + if self._symmetry: + tmp = ", ".join("%s -> %s" % (a, b) for (a, b) in self._symmetry.items()) + sym = " with symmetries (%s)" % tmp + else: + sym = "" + return "Tensor<%s x %s%s>" % (self.reference_value_shape, + self._sub_element.shortstr(), sym) diff --git a/finat/ufl/restrictedelement.py b/finat/ufl/restrictedelement.py new file mode 100644 index 000000000..797cca22c --- /dev/null +++ b/finat/ufl/restrictedelement.py @@ -0,0 +1,113 @@ +"""This module defines the UFL finite element classes.""" + +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.sobolevspace import L2 + +valid_restriction_domains = ("interior", "facet", "face", "edge", "vertex", "reduced") + + +class RestrictedElement(FiniteElementBase): + """Represents the restriction of a finite element to a type of cell entity.""" + + def __init__(self, element, restriction_domain): + """Doc.""" + if not isinstance(element, FiniteElementBase): + raise ValueError("Expecting a finite element instance.") + if restriction_domain not in valid_restriction_domains: + raise ValueError(f"Expecting one of the strings: {valid_restriction_domains}") + + FiniteElementBase.__init__(self, "RestrictedElement", element.cell, + element.degree(), + element.quadrature_scheme(), + element.reference_value_shape) + + self._element = element + + self._restriction_domain = restriction_domain + + def __repr__(self): + """Doc.""" + return f"RestrictedElement({repr(self._element)}, {repr(self._restriction_domain)})" + + @property + def sobolev_space(self): + """Doc.""" + if self._restriction_domain == "interior": + return L2 + else: + return self._element.sobolev_space + + def is_cellwise_constant(self): + """Return whether the basis functions of this element is spatially constant over each cell.""" + return self._element.is_cellwise_constant() + + def _is_linear(self): + """Doc.""" + return self._element._is_linear() + + def sub_element(self): + """Return the element which is restricted.""" + return self._element + + def mapping(self): + """Doc.""" + return self._element.mapping() + + def restriction_domain(self): + """Return the domain onto which the element is restricted.""" + return self._restriction_domain + + def reconstruct(self, **kwargs): + """Doc.""" + element = self._element.reconstruct(**kwargs) + return RestrictedElement(element, self._restriction_domain) + + def __str__(self): + """Format as string for pretty printing.""" + return "<%s>|_{%s}" % (self._element, self._restriction_domain) + + def shortstr(self): + """Format as string for pretty printing.""" + return "<%s>|_{%s}" % (self._element.shortstr(), + self._restriction_domain) + + def symmetry(self): + r"""Return the symmetry dict, which is a mapping :math:`c_0 \\to c_1`. + + meaning that component :math:`c_0` is represented by component + :math:`c_1`. A component is a tuple of one or more ints. + """ + return self._element.symmetry() + + @property + def num_sub_elements(self): + """Return number of sub elements.""" + return self._element.num_sub_elements + + @property + def sub_elements(self): + """Return list of sub elements.""" + return self._element.sub_elements + + def num_restricted_sub_elements(self): + """Return number of restricted sub elements.""" + return 1 + + def restricted_sub_elements(self): + """Return list of restricted sub elements.""" + return (self._element,) + + def variant(self): + """Doc.""" + return self._element.variant() diff --git a/finat/ufl/tensorproductelement.py b/finat/ufl/tensorproductelement.py new file mode 100644 index 000000000..760626f87 --- /dev/null +++ b/finat/ufl/tensorproductelement.py @@ -0,0 +1,138 @@ +"""This module defines the UFL finite element classes.""" + +# Copyright (C) 2008-2016 Martin Sandve Alnæs +# +# This file was originally part of UFL (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Modified by Kristian B. Oelgaard +# Modified by Marie E. Rognes 2010, 2012 +# Modified by Massimiliano Leoni, 2016 +# Modified by Matthew Scroggs, 2023 + +from itertools import chain + +from ufl.cell import TensorProductCell, as_cell +from finat.ufl.finiteelementbase import FiniteElementBase +from ufl.sobolevspace import DirectionalSobolevSpace + + +class TensorProductElement(FiniteElementBase): + r"""The tensor product of :math:`d` element spaces. + + .. math:: V = V_1 \otimes V_2 \otimes ... \otimes V_d + + Given bases :math:`\{\phi_{j_i}\}` of the spaces :math:`V_i` for :math:`i = 1, ...., d`, + :math:`\{ \phi_{j_1} \otimes \phi_{j_2} \otimes \cdots \otimes \phi_{j_d} + \}` forms a basis for :math:`V`. + """ + __slots__ = ("_sub_elements", "_cell") + + def __init__(self, *elements, **kwargs): + """Create TensorProductElement from a given list of elements.""" + if not elements: + raise ValueError("Cannot create TensorProductElement from empty list.") + + keywords = list(kwargs.keys()) + if keywords and keywords != ["cell"]: + raise ValueError("TensorProductElement got an unexpected keyword argument '%s'" % keywords[0]) + cell = kwargs.get("cell") + + family = "TensorProductElement" + + if cell is None: + # Define cell as the product of each elements cell + cell = TensorProductCell(*[e.cell for e in elements]) + else: + cell = as_cell(cell) + + # Define polynomial degree as a tuple of sub-degrees + degree = tuple(e.degree() for e in elements) + + # No quadrature scheme defined + quad_scheme = None + + # match FIAT implementation + reference_value_shape = tuple(chain(*[e.reference_value_shape for e in elements])) + if len(reference_value_shape) > 1: + raise ValueError("Product of vector-valued elements not supported") + + FiniteElementBase.__init__(self, family, cell, degree, + quad_scheme, reference_value_shape) + self._sub_elements = elements + self._cell = cell + + def __repr__(self): + """Doc.""" + return "TensorProductElement(" + ", ".join(repr(e) for e in self._sub_elements) + f", cell={repr(self._cell)})" + + def mapping(self): + """Doc.""" + if all(e.mapping() == "identity" for e in self._sub_elements): + return "identity" + elif all(e.mapping() == "L2 Piola" for e in self._sub_elements): + return "L2 Piola" + else: + return "undefined" + + @property + def sobolev_space(self): + """Return the underlying Sobolev space of the TensorProductElement.""" + elements = self._sub_elements + if all(e.sobolev_space == elements[0].sobolev_space + for e in elements): + return elements[0].sobolev_space + else: + # Generate a DirectionalSobolevSpace which contains + # continuity information parametrized by spatial index + orders = [] + for e in elements: + # TODO: is this the right value for e_dim + e_dim = e.cell.topological_dimension() + e_order = (e.sobolev_space._order,) * e_dim + orders.extend(e_order) + return DirectionalSobolevSpace(orders) + + @property + def num_sub_elements(self): + """Return number of subelements.""" + return len(self._sub_elements) + + @property + def sub_elements(self): + """Return subelements (factors).""" + return self._sub_elements + + def reconstruct(self, **kwargs): + """Doc.""" + cell = kwargs.pop("cell", self.cell) + return TensorProductElement(*[e.reconstruct(**kwargs) for e in self.sub_elements], cell=cell) + + def variant(self): + """Doc.""" + try: + variant, = {e.variant() for e in self.sub_elements} + return variant + except ValueError: + return None + + def __str__(self): + """Pretty-print.""" + return "TensorProductElement(%s, cell=%s)" \ + % (', '.join([str(e) for e in self._sub_elements]), str(self._cell)) + + def shortstr(self): + """Short pretty-print.""" + return "TensorProductElement(%s, cell=%s)" \ + % (', '.join([e.shortstr() for e in self._sub_elements]), str(self._cell)) + + @property + def embedded_superdegree(self): + """Doc.""" + return sum(self.degree()) + + @property + def embedded_subdegree(self): + """Doc.""" + return min(self.degree()) diff --git a/gem/__init__.py b/gem/__init__.py new file mode 100644 index 000000000..f1e772037 --- /dev/null +++ b/gem/__init__.py @@ -0,0 +1,2 @@ +from gem.gem import * # noqa +from gem.optimise import select_expression # noqa diff --git a/gem/coffee.py b/gem/coffee.py new file mode 100644 index 000000000..f766a4890 --- /dev/null +++ b/gem/coffee.py @@ -0,0 +1,192 @@ +"""This module contains an implementation of the COFFEE optimisation +algorithm operating on a GEM representation. + +This file is NOT for code generation as a COFFEE AST. +""" + +from collections import OrderedDict +import itertools +import logging + +import numpy + +from gem.gem import IndexSum, one +from gem.optimise import make_sum, make_product +from gem.refactorise import Monomial +from gem.utils import groupby + + +__all__ = ['optimise_monomial_sum'] + + +def monomial_sum_to_expression(monomial_sum): + """Convert a monomial sum to a GEM expression. + + :arg monomial_sum: an iterable of :class:`Monomial`s + + :returns: GEM expression + """ + indexsums = [] # The result is summation of indexsums + # Group monomials according to their sum indices + groups = groupby(monomial_sum, key=lambda m: frozenset(m.sum_indices)) + # Create IndexSum's from each monomial group + for _, monomials in groups: + sum_indices = monomials[0].sum_indices + products = [make_product(monomial.atomics + (monomial.rest,)) for monomial in monomials] + indexsums.append(IndexSum(make_sum(products), sum_indices)) + return make_sum(indexsums) + + +def index_extent(factor, linear_indices): + """Compute the product of the extents of linear indices of a GEM expression + + :arg factor: GEM expression + :arg linear_indices: set of linear indices + + :returns: product of extents of linear indices + """ + return numpy.prod([i.extent for i in factor.free_indices if i in linear_indices]) + + +def find_optimal_atomics(monomials, linear_indices): + """Find optimal atomic common subexpressions, which produce least number of + terms in the resultant IndexSum when factorised. + + :arg monomials: A list of :class:`Monomial`s, all of which should have + the same sum indices + :arg linear_indices: tuple of linear indices + + :returns: list of atomic GEM expressions + """ + atomics = tuple(OrderedDict.fromkeys(itertools.chain(*(monomial.atomics for monomial in monomials)))) + + def cost(solution): + extent = sum(map(lambda atomic: index_extent(atomic, linear_indices), solution)) + # Prefer shorter solutions, but larger extents + return (len(solution), -extent) + + optimal_solution = set(atomics) # pessimal but feasible solution + solution = set() + + max_it = 1 << 12 + it = iter(range(max_it)) + + def solve(idx): + while idx < len(monomials) and solution.intersection(monomials[idx].atomics): + idx += 1 + + if idx < len(monomials): + if len(solution) < len(optimal_solution): + for atomic in monomials[idx].atomics: + solution.add(atomic) + solve(idx + 1) + solution.remove(atomic) + else: + if cost(solution) < cost(optimal_solution): + optimal_solution.clear() + optimal_solution.update(solution) + next(it) + + try: + solve(0) + except StopIteration: + logger = logging.getLogger('tsfc') + logger.warning("Solution to ILP problem may not be optimal: search " + "interrupted after examining %d solutions.", max_it) + + return tuple(atomic for atomic in atomics if atomic in optimal_solution) + + +def factorise_atomics(monomials, optimal_atomics, linear_indices): + """Group and factorise monomials using a list of atomics as common + subexpressions. Create new monomials for each group and optimise them recursively. + + :arg monomials: an iterable of :class:`Monomial`s, all of which should have + the same sum indices + :arg optimal_atomics: list of tuples of atomics to be used as common subexpression + :arg linear_indices: tuple of linear indices + + :returns: an iterable of :class:`Monomials`s after factorisation + """ + if not optimal_atomics or len(monomials) <= 1: + return monomials + + # Group monomials with respect to each optimal atomic + def group_key(monomial): + for oa in optimal_atomics: + if oa in monomial.atomics: + return oa + assert False, "Expect at least one optimal atomic per monomial." + factor_group = groupby(monomials, key=group_key) + + # We should not drop monomials + assert sum(len(ms) for _, ms in factor_group) == len(monomials) + + sum_indices = next(iter(monomials)).sum_indices + new_monomials = [] + for oa, monomials in factor_group: + # Create new MonomialSum for the factorised out terms + sub_monomials = [] + for monomial in monomials: + atomics = list(monomial.atomics) + atomics.remove(oa) # remove common factor + sub_monomials.append(Monomial((), tuple(atomics), monomial.rest)) + # Continue to factorise the remaining expression + sub_monomials = optimise_monomials(sub_monomials, linear_indices) + if len(sub_monomials) == 1: + # Factorised part is a product, we add back the common atomics then + # add to new MonomialSum directly rather than forming a product node + # Retaining the monomial structure enables applying associativity + # when forming GEM nodes later. + sub_monomial, = sub_monomials + new_monomials.append( + Monomial(sum_indices, (oa,) + sub_monomial.atomics, sub_monomial.rest)) + else: + # Factorised part is a summation, we need to create a new GEM node + # and multiply with the common factor + node = monomial_sum_to_expression(sub_monomials) + # If the free indices of the new node intersect with linear indices, + # add to the new monomial as `atomic`, otherwise add as `rest`. + # Note: we might want to continue to factorise with the new atomics + # by running optimise_monoials twice. + if set(linear_indices) & set(node.free_indices): + new_monomials.append(Monomial(sum_indices, (oa, node), one)) + else: + new_monomials.append(Monomial(sum_indices, (oa, ), node)) + return new_monomials + + +def optimise_monomial_sum(monomial_sum, linear_indices): + """Choose optimal common atomic subexpressions and factorise a + :class:`MonomialSum` object to create a GEM expression. + + :arg monomial_sum: a :class:`MonomialSum` object + :arg linear_indices: tuple of linear indices + + :returns: factorised GEM expression + """ + groups = groupby(monomial_sum, key=lambda m: frozenset(m.sum_indices)) + new_monomials = [] + for _, monomials in groups: + new_monomials.extend(optimise_monomials(monomials, linear_indices)) + return monomial_sum_to_expression(new_monomials) + + +def optimise_monomials(monomials, linear_indices): + """Choose optimal common atomic subexpressions and factorise an iterable + of monomials. + + :arg monomials: a list of :class:`Monomial`s, all of which should have + the same sum indices + :arg linear_indices: tuple of linear indices + + :returns: an iterable of factorised :class:`Monomials`s + """ + assert len(set(frozenset(m.sum_indices) for m in monomials)) <= 1, \ + "All monomials required to have same sum indices for factorisation" + + result = [m for m in monomials if not m.atomics] # skipped monomials + active_monomials = [m for m in monomials if m.atomics] + optimal_atomics = find_optimal_atomics(active_monomials, linear_indices) + result += factorise_atomics(active_monomials, optimal_atomics, linear_indices) + return result diff --git a/gem/flop_count.py b/gem/flop_count.py new file mode 100644 index 000000000..b9595e817 --- /dev/null +++ b/gem/flop_count.py @@ -0,0 +1,197 @@ +""" +This file contains all the necessary functions to accurately count the +total number of floating point operations for a given script. +""" + +import gem.gem as gem +import gem.impero as imp +from functools import singledispatch +import numpy +import math + + +@singledispatch +def statement(tree, temporaries): + raise NotImplementedError + + +@statement.register(imp.Block) +def statement_block(tree, temporaries): + flops = sum(statement(child, temporaries) for child in tree.children) + return flops + + +@statement.register(imp.For) +def statement_for(tree, temporaries): + extent = tree.index.extent + assert extent is not None + child, = tree.children + flops = statement(child, temporaries) + return flops * extent + + +@statement.register(imp.Initialise) +def statement_initialise(tree, temporaries): + return 0 + + +@statement.register(imp.Accumulate) +def statement_accumulate(tree, temporaries): + flops = expression_flops(tree.indexsum.children[0], temporaries) + return flops + 1 + + +@statement.register(imp.Return) +def statement_return(tree, temporaries): + flops = expression_flops(tree.expression, temporaries) + return flops + 1 + + +@statement.register(imp.ReturnAccumulate) +def statement_returnaccumulate(tree, temporaries): + flops = expression_flops(tree.indexsum.children[0], temporaries) + return flops + 1 + + +@statement.register(imp.Evaluate) +def statement_evaluate(tree, temporaries): + flops = expression_flops(tree.expression, temporaries, top=True) + return flops + + +@singledispatch +def flops(expr, temporaries): + raise NotImplementedError(f"Don't know how to count flops of {type(expr)}") + + +@flops.register(gem.Failure) +def flops_failure(expr, temporaries): + raise ValueError("Not expecting a Failure node") + + +@flops.register(gem.Variable) +@flops.register(gem.Identity) +@flops.register(gem.Delta) +@flops.register(gem.Zero) +@flops.register(gem.Literal) +@flops.register(gem.Index) +@flops.register(gem.VariableIndex) +def flops_zero(expr, temporaries): + # Initial set up of these Gem nodes are of 0 floating point operations. + return 0 + + +@flops.register(gem.LogicalNot) +@flops.register(gem.LogicalAnd) +@flops.register(gem.LogicalOr) +@flops.register(gem.ListTensor) +def flops_zeroplus(expr, temporaries): + # These nodes contribute 0 floating point operations, but their children may not. + return 0 + sum(expression_flops(child, temporaries) + for child in expr.children) + + +@flops.register(gem.Product) +def flops_product(expr, temporaries): + # Multiplication by -1 is not a flop. + a, b = expr.children + if isinstance(a, gem.Literal) and a.value == -1: + return expression_flops(b, temporaries) + elif isinstance(b, gem.Literal) and b.value == -1: + return expression_flops(a, temporaries) + else: + return 1 + sum(expression_flops(child, temporaries) + for child in expr.children) + + +@flops.register(gem.Sum) +@flops.register(gem.Division) +@flops.register(gem.Comparison) +@flops.register(gem.MathFunction) +@flops.register(gem.MinValue) +@flops.register(gem.MaxValue) +def flops_oneplus(expr, temporaries): + return 1 + sum(expression_flops(child, temporaries) + for child in expr.children) + + +@flops.register(gem.Power) +def flops_power(expr, temporaries): + base, exponent = expr.children + base_flops = expression_flops(base, temporaries) + if isinstance(exponent, gem.Literal): + exponent = exponent.value + if exponent > 0 and exponent == math.floor(exponent): + return base_flops + int(math.ceil(math.log2(exponent))) + else: + return base_flops + 5 # heuristic + else: + return base_flops + 5 # heuristic + + +@flops.register(gem.Conditional) +def flops_conditional(expr, temporaries): + condition, then, else_ = (expression_flops(child, temporaries) + for child in expr.children) + return condition + max(then, else_) + + +@flops.register(gem.Indexed) +@flops.register(gem.FlexiblyIndexed) +def flops_indexed(expr, temporaries): + aggregate = sum(expression_flops(child, temporaries) + for child in expr.children) + # Average flops per entry + return aggregate / numpy.prod(expr.children[0].shape, dtype=int) + + +@flops.register(gem.IndexSum) +def flops_indexsum(expr, temporaries): + raise ValueError("Not expecting IndexSum") + + +@flops.register(gem.Inverse) +def flops_inverse(expr, temporaries): + n, _ = expr.shape + # 2n^3 + child flop count + return 2*n**3 + sum(expression_flops(child, temporaries) + for child in expr.children) + + +@flops.register(gem.Solve) +def flops_solve(expr, temporaries): + n, m = expr.shape + # 2mn + inversion cost of A + children flop count + return 2*n*m + 2*n**3 + sum(expression_flops(child, temporaries) + for child in expr.children) + + +@flops.register(gem.ComponentTensor) +def flops_componenttensor(expr, temporaries): + raise ValueError("Not expecting ComponentTensor") + + +def expression_flops(expression, temporaries, top=False): + """An approximation to flops required for each expression. + + :arg expression: GEM expression. + :arg temporaries: Expressions that are assigned to temporaries + :arg top: are we at the root? + :returns: flop count for the expression + """ + if not top and expression in temporaries: + return 0 + else: + return flops(expression, temporaries) + + +def count_flops(impero_c): + """An approximation to flops required for a scheduled impero_c tree. + + :arg impero_c: a :class:`~.Impero_C` object. + :returns: approximate flop count for the tree. + """ + try: + return statement(impero_c.tree, set(impero_c.temporaries)) + except (ValueError, NotImplementedError): + return 0 diff --git a/gem/gem.py b/gem/gem.py new file mode 100644 index 000000000..9f00d5535 --- /dev/null +++ b/gem/gem.py @@ -0,0 +1,1292 @@ +"""GEM is the intermediate language of TSFC for describing +tensor-valued mathematical expressions and tensor operations. +It is similar to Einstein's notation. + +Its design was heavily inspired by UFL, with some major differences: + - GEM has got nothing FEM-specific. + - In UFL free indices are just unrolled shape, thus UFL is very + restrictive about operations on expressions with different sets of + free indices. GEM is much more relaxed about free indices. + +Similarly to UFL, all GEM nodes have 'shape' and 'free_indices' +attributes / properties. Unlike UFL, however, index extents live on +the Index objects in GEM, not on all the nodes that have those free +indices. +""" + +from abc import ABCMeta +from itertools import chain +from operator import attrgetter +from numbers import Integral, Number + +import numpy +from numpy import asarray + +from gem.node import Node as NodeBase, traversal + +from FIAT.orientation_utils import Orientation as FIATOrientation + + +__all__ = ['Node', 'Identity', 'Literal', 'Zero', 'Failure', + 'Variable', 'Sum', 'Product', 'Division', 'FloorDiv', 'Remainder', 'Power', + 'MathFunction', 'MinValue', 'MaxValue', 'Comparison', + 'LogicalNot', 'LogicalAnd', 'LogicalOr', 'Conditional', + 'Index', 'VariableIndex', 'Indexed', 'ComponentTensor', + 'IndexSum', 'ListTensor', 'Concatenate', 'Delta', 'OrientationVariableIndex', + 'index_sum', 'partial_indexed', 'reshape', 'view', + 'indices', 'as_gem', 'FlexiblyIndexed', + 'Inverse', 'Solve', 'extract_type', 'uint_type'] + + +uint_type = numpy.dtype(numpy.uintc) + + +class NodeMeta(type): + """Metaclass of GEM nodes. + + When a GEM node is constructed, this metaclass automatically + collects its free indices if 'free_indices' has not been set yet. + """ + + def __call__(self, *args, **kwargs): + # Create and initialise object + obj = super(NodeMeta, self).__call__(*args, **kwargs) + + # Set free_indices if not set already + if not hasattr(obj, 'free_indices'): + obj.free_indices = unique(chain(*[c.free_indices + for c in obj.children])) + # Set dtype if not set already. + if not hasattr(obj, 'dtype'): + obj.dtype = obj.inherit_dtype_from_children(obj.children) + + return obj + + +class Node(NodeBase, metaclass=NodeMeta): + """Abstract GEM node class.""" + + __slots__ = ('free_indices', 'dtype') + + def is_equal(self, other): + """Common subexpression eliminating equality predicate. + + When two (sub)expressions are equal, the children of one + object are reassigned to the children of the other, so some + duplicated subexpressions are eliminated. + """ + result = NodeBase.is_equal(self, other) + if result: + self.children = other.children + return result + + def __getitem__(self, indices): + try: + indices = tuple(indices) + except TypeError: + indices = (indices, ) + return Indexed(self, indices) + + def __add__(self, other): + return componentwise(Sum, self, as_gem(other)) + + def __radd__(self, other): + return as_gem(other).__add__(self) + + def __sub__(self, other): + return componentwise( + Sum, self, + componentwise(Product, Literal(-1), as_gem(other))) + + def __rsub__(self, other): + return as_gem(other).__sub__(self) + + def __mul__(self, other): + return componentwise(Product, self, as_gem(other)) + + def __rmul__(self, other): + return as_gem(other).__mul__(self) + + def __matmul__(self, other): + other = as_gem(other) + if not self.shape and not other.shape: + return Product(self, other) + elif not (self.shape and other.shape): + raise ValueError("Both objects must have shape for matmul") + elif self.shape[-1] != other.shape[0]: + raise ValueError(f"Mismatching shapes {self.shape} and {other.shape} in matmul") + *i, k = indices(len(self.shape)) + _, *j = indices(len(other.shape)) + expr = Product(Indexed(self, tuple(i) + (k, )), + Indexed(other, (k, ) + tuple(j))) + return ComponentTensor(IndexSum(expr, (k, )), tuple(i) + tuple(j)) + + def __rmatmul__(self, other): + return as_gem(other).__matmul__(self) + + @property + def T(self): + i = indices(len(self.shape)) + return ComponentTensor(Indexed(self, i), tuple(reversed(i))) + + def __truediv__(self, other): + other = as_gem(other) + if other.shape: + raise ValueError("Denominator must be scalar") + return componentwise(Division, self, other) + + def __rtruediv__(self, other): + return as_gem(other).__truediv__(self) + + def __floordiv__(self, other): + other = as_gem_uint(other) + if other.shape: + raise ValueError("Denominator must be scalar") + return componentwise(FloorDiv, self, other) + + def __rfloordiv__(self, other): + return as_gem_uint(other).__floordiv__(self) + + def __mod__(self, other): + other = as_gem_uint(other) + if other.shape: + raise ValueError("Denominator must be scalar") + return componentwise(Remainder, self, other) + + def __rmod__(self, other): + return as_gem_uint(other).__mod__(self) + + @staticmethod + def inherit_dtype_from_children(children): + if any(c.dtype is None for c in children): + # Set dtype = None will let _assign_dtype() + # assign the default dtype for this node later. + return + else: + return numpy.result_type(*(c.dtype for c in children)) + + +class Terminal(Node): + """Abstract class for terminal GEM nodes.""" + + __slots__ = ('_dtype',) + + children = () + + is_equal = NodeBase.is_equal + + @property + def dtype(self): + """Data type of the node. + + We only need to set dtype (or _dtype) on terminal nodes, and + other nodes inherit dtype from their children. + + Currently dtype is significant only for nodes under index DAGs + (DAGs underneath `VariableIndex`s representing indices), and + `VariableIndex` checks if the dtype of the node that it wraps is + of uint_type. _assign_dtype() will then assign uint_type to those nodes. + + dtype can be `None` otherwise, and _assign_dtype() will assign + the default dtype to those nodes. + + """ + if hasattr(self, '_dtype'): + return self._dtype + else: + raise AttributeError(f"Must set _dtype on terminal node, {type(self)}") + + +class Scalar(Node): + """Abstract class for scalar-valued GEM nodes.""" + + __slots__ = () + + shape = () + + +class Failure(Terminal): + """Abstract class for failure GEM nodes.""" + + __slots__ = ('shape', 'exception') + __front__ = ('shape', 'exception') + + def __init__(self, shape, exception): + self.shape = shape + self.exception = exception + self._dtype = None + + +class Constant(Terminal): + """Abstract base class for constant types. + + Convention: + - array: numpy array of values + - value: float or complex value (scalars only) + """ + pass + + +class Zero(Constant): + """Symbolic zero tensor""" + + __slots__ = ('shape',) + __front__ = ('shape',) + __back__ = ('dtype',) + + def __init__(self, shape=(), dtype=None): + self.shape = shape + self._dtype = dtype + + @property + def value(self): + assert not self.shape + return numpy.array(0, dtype=self.dtype or float).item() + + +class Identity(Constant): + """Identity matrix""" + + __slots__ = ('dim',) + __front__ = ('dim',) + __back__ = ('dtype',) + + def __init__(self, dim, dtype=None): + self.dim = dim + self._dtype = dtype + + @property + def shape(self): + return (self.dim, self.dim) + + @property + def array(self): + return numpy.eye(self.dim, dtype=self.dtype) + + +class Literal(Constant): + """Tensor-valued constant""" + + __slots__ = ('array',) + __front__ = ('array',) + __back__ = ('dtype',) + + def __new__(cls, array, dtype=None): + array = asarray(array) + return super(Literal, cls).__new__(cls) + + def __init__(self, array, dtype=None): + array = asarray(array) + if dtype is None: + # Assume float or complex. + try: + self.array = array.astype(float, casting="safe") + except TypeError: + self.array = array.astype(complex) + else: + # Can be int, etc. + self.array = array.astype(dtype) + self._dtype = self.array.dtype + + def is_equal(self, other): + if type(self) is not type(other): + return False + if self.shape != other.shape: + return False + return tuple(self.array.flat) == tuple(other.array.flat) + + def get_hash(self): + return hash((type(self), self.shape, tuple(self.array.flat))) + + @property + def value(self): + assert self.shape == () + return self.array.dtype.type(self.array) + + @property + def shape(self): + return self.array.shape + + +class Variable(Terminal): + """Symbolic variable tensor""" + + __slots__ = ('name', 'shape') + __front__ = ('name', 'shape') + __back__ = ('dtype',) + + def __init__(self, name, shape, dtype=None): + self.name = name + self.shape = shape + self._dtype = dtype + + +class Sum(Scalar): + __slots__ = ('children',) + + def __new__(cls, a, b): + assert not a.shape + assert not b.shape + + # Constant folding + if isinstance(a, Zero): + return b + elif isinstance(b, Zero): + return a + + if isinstance(a, Constant) and isinstance(b, Constant): + return Literal(a.value + b.value, dtype=Node.inherit_dtype_from_children([a, b])) + + self = super(Sum, cls).__new__(cls) + self.children = a, b + return self + + +class Product(Scalar): + __slots__ = ('children',) + + def __new__(cls, a, b): + assert not a.shape + assert not b.shape + + # Constant folding + if isinstance(a, Zero) or isinstance(b, Zero): + return Zero() + + if a == one: + return b + if b == one: + return a + + if isinstance(a, Constant) and isinstance(b, Constant): + return Literal(a.value * b.value, dtype=Node.inherit_dtype_from_children([a, b])) + + self = super(Product, cls).__new__(cls) + self.children = a, b + return self + + +class Division(Scalar): + __slots__ = ('children',) + + def __new__(cls, a, b): + assert not a.shape + assert not b.shape + + # Constant folding + if isinstance(b, Zero): + raise ValueError("division by zero") + if isinstance(a, Zero): + return Zero() + + if b == one: + return a + + if isinstance(a, Constant) and isinstance(b, Constant): + return Literal(a.value / b.value, dtype=Node.inherit_dtype_from_children([a, b])) + + self = super(Division, cls).__new__(cls) + self.children = a, b + return self + + +class FloorDiv(Scalar): + __slots__ = ('children',) + + def __new__(cls, a, b): + assert not a.shape + assert not b.shape + dtype = Node.inherit_dtype_from_children([a, b]) + if dtype != uint_type: + raise ValueError(f"dtype ({dtype}) != unit_type ({uint_type})") + # Constant folding + if isinstance(b, Zero): + raise ValueError("division by zero") + if isinstance(a, Zero): + return Zero(dtype=dtype) + if isinstance(b, Constant) and b.value == 1: + return a + if isinstance(a, Constant) and isinstance(b, Constant): + return Literal(a.value // b.value, dtype=dtype) + self = super(FloorDiv, cls).__new__(cls) + self.children = a, b + return self + + +class Remainder(Scalar): + __slots__ = ('children',) + + def __new__(cls, a, b): + assert not a.shape + assert not b.shape + dtype = Node.inherit_dtype_from_children([a, b]) + if dtype != uint_type: + raise ValueError(f"dtype ({dtype}) != uint_type ({uint_type})") + # Constant folding + if isinstance(b, Zero): + raise ValueError("division by zero") + if isinstance(a, Zero): + return Zero(dtype=dtype) + if isinstance(b, Constant) and b.value == 1: + return Zero(dtype=dtype) + if isinstance(a, Constant) and isinstance(b, Constant): + return Literal(a.value % b.value, dtype=dtype) + self = super(Remainder, cls).__new__(cls) + self.children = a, b + return self + + +class Power(Scalar): + __slots__ = ('children',) + + def __new__(cls, base, exponent): + assert not base.shape + assert not exponent.shape + dtype = Node.inherit_dtype_from_children([base, exponent]) + + # Constant folding + if isinstance(base, Zero): + if isinstance(exponent, Zero): + raise ValueError("cannot solve 0^0") + return Zero(dtype=dtype) + elif isinstance(exponent, Zero): + return Literal(1, dtype=dtype) + elif isinstance(base, Constant) and isinstance(exponent, Constant): + return Literal(base.value ** exponent.value, dtype=dtype) + + self = super(Power, cls).__new__(cls) + self.children = base, exponent + return self + + +class MathFunction(Scalar): + __slots__ = ('name', 'children') + __front__ = ('name',) + + def __new__(cls, name, *args): + assert isinstance(name, str) + assert all(arg.shape == () for arg in args) + + if name in {'conj', 'real', 'imag'}: + arg, = args + if isinstance(arg, Zero): + return arg + + self = super(MathFunction, cls).__new__(cls) + self.name = name + self.children = args + return self + + +class MinValue(Scalar): + __slots__ = ('children',) + + def __init__(self, a, b): + assert not a.shape + assert not b.shape + + self.children = a, b + + +class MaxValue(Scalar): + __slots__ = ('children',) + + def __init__(self, a, b): + assert not a.shape + assert not b.shape + + self.children = a, b + + +class Comparison(Scalar): + __slots__ = ('operator', 'children') + __front__ = ('operator',) + + def __init__(self, op, a, b): + assert not a.shape + assert not b.shape + + if op not in [">", ">=", "==", "!=", "<", "<="]: + raise ValueError("invalid operator") + + self.operator = op + self.children = a, b + self.dtype = None # Do not inherit dtype from children. + + +class LogicalNot(Scalar): + __slots__ = ('children',) + + def __init__(self, expression): + assert not expression.shape + + self.children = expression, + + +class LogicalAnd(Scalar): + __slots__ = ('children',) + + def __init__(self, a, b): + assert not a.shape + assert not b.shape + + self.children = a, b + + +class LogicalOr(Scalar): + __slots__ = ('children',) + + def __init__(self, a, b): + assert not a.shape + assert not b.shape + + self.children = a, b + + +class Conditional(Node): + __slots__ = ('children', 'shape') + + def __new__(cls, condition, then, else_): + assert not condition.shape + assert then.shape == else_.shape == () + + # If both branches are the same, just return one of them. In + # particular, this will help constant-fold zeros. + if then == else_: + return then + + self = super(Conditional, cls).__new__(cls) + self.children = condition, then, else_ + self.shape = then.shape + self.dtype = Node.inherit_dtype_from_children([then, else_]) + return self + + +class IndexBase(metaclass=ABCMeta): + """Abstract base class for indices.""" + pass + + +IndexBase.register(int) + + +class Index(IndexBase): + """Free index""" + + # Not true object count, just for naming purposes + _count = 0 + + __slots__ = ('name', 'extent', 'count') + + def __init__(self, name=None, extent=None): + self.name = name + Index._count += 1 + self.count = Index._count + self.extent = extent + + def set_extent(self, value): + # Set extent, check for consistency + if self.extent is None: + self.extent = value + elif self.extent != value: + raise ValueError("Inconsistent index extents!") + + def __str__(self): + if self.name is None: + return "i_%d" % self.count + return self.name + + def __repr__(self): + if self.name is None: + return "Index(%r)" % self.count + return "Index(%r)" % self.name + + def __lt__(self, other): + # Allow sorting of free indices in Python 3 + return id(self) < id(other) + + def __getstate__(self): + return self.name, self.extent, self.count + + def __setstate__(self, state): + self.name, self.extent, self.count = state + + +class VariableIndex(IndexBase): + """An index that is constant during a single execution of the + kernel, but whose value is not known at compile time.""" + + __slots__ = ('expression',) + + def __init__(self, expression): + assert isinstance(expression, Node) + assert not expression.shape + if expression.dtype != uint_type: + raise ValueError(f"expression.dtype ({expression.dtype}) != uint_type ({uint_type})") + self.expression = expression + + def __eq__(self, other): + if self is other: + return True + if type(self) is not type(other): + return False + return self.expression == other.expression + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((type(self), self.expression)) + + def __str__(self): + return str(self.expression) + + def __repr__(self): + return "%r(%r)" % (type(self), self.expression,) + + def __reduce__(self): + return type(self), (self.expression,) + + +class Indexed(Scalar): + __slots__ = ('children', 'multiindex', 'indirect_children') + __back__ = ('multiindex',) + + def __new__(cls, aggregate, multiindex): + # Accept numpy or any integer, but cast to int. + multiindex = tuple(int(i) if isinstance(i, Integral) else i + for i in multiindex) + + # Set index extents from shape + assert len(aggregate.shape) == len(multiindex) + for index, extent in zip(multiindex, aggregate.shape): + assert isinstance(index, IndexBase) + if isinstance(index, Index): + index.set_extent(extent) + elif isinstance(index, int) and not (0 <= index < extent): + raise IndexError("Invalid literal index") + + # Empty multiindex + if not multiindex: + return aggregate + + # Zero folding + if isinstance(aggregate, Zero): + return Zero(dtype=aggregate.dtype) + + # All indices fixed + if all(isinstance(i, int) for i in multiindex): + if isinstance(aggregate, Constant): + return Literal(aggregate.array[multiindex], dtype=aggregate.dtype) + elif isinstance(aggregate, ListTensor): + return aggregate.array[multiindex] + + self = super(Indexed, cls).__new__(cls) + self.children = (aggregate,) + self.multiindex = multiindex + self.indirect_children = tuple(i.expression for i in self.multiindex if isinstance(i, VariableIndex)) + + new_indices = [] + for i in multiindex: + if isinstance(i, Index): + new_indices.append(i) + elif isinstance(i, VariableIndex): + new_indices.extend(i.expression.free_indices) + self.free_indices = unique(aggregate.free_indices + tuple(new_indices)) + + return self + + def index_ordering(self): + """Running indices in the order of indexing in this node.""" + free_indices = [] + for i in self.multiindex: + if isinstance(i, Index): + free_indices.append(i) + elif isinstance(i, VariableIndex): + free_indices.extend(i.expression.free_indices) + return tuple(free_indices) + + +class FlexiblyIndexed(Scalar): + """Flexible indexing of :py:class:`Variable`s to implement views and + reshapes (splitting dimensions only).""" + + __slots__ = ('children', 'dim2idxs', 'indirect_children') + __back__ = ('dim2idxs',) + + def __init__(self, variable, dim2idxs): + """Construct a flexibly indexed node. + + Parameters + ---------- + variable : Node + `Node` that has a shape. + dim2idxs : tuple + Tuple of (offset, ((index, stride), (...), ...)) mapping indices, + where offset is {Node, int}, index is {Index, VariableIndex, int}, and + stride is {Node, int}. + + For example, if ``variable`` is rank two, and ``dim2idxs`` is + + ((1, ((i, 12), (j, 4), (k, 1))), (0, ())) + + then this corresponds to the indexing: + + variable[1 + i*12 + j*4 + k][0] + + """ + assert variable.shape + assert len(variable.shape) == len(dim2idxs) + dim2idxs_ = [] + free_indices = [] + for dim, (offset, idxs) in zip(variable.shape, dim2idxs): + offset_ = offset + idxs_ = [] + last = 0 + if isinstance(offset, Node): + free_indices.extend(offset.free_indices) + for index, stride in idxs: + if isinstance(index, Index): + assert index.extent is not None + free_indices.append(index) + idxs_.append((index, stride)) + last += (index.extent - 1) * stride + elif isinstance(index, VariableIndex): + base_indices = index.expression.free_indices + assert all(base_index.extent is not None for base_index in base_indices) + free_indices.extend(base_indices) + idxs_.append((index, stride)) + # last += (unknown_extent - 1) * stride + elif isinstance(index, int): + # TODO: Attach dtype to each Node. + # Here, we should simply be able to do: + # >>> offset_ += index * stride + # but "+" and "*" are not currently correctly overloaded + # for indices (integers); they assume floats. + if not isinstance(offset, Integral): + raise NotImplementedError(f"Found non-Integral offset : {offset}") + if isinstance(stride, Constant): + offset_ += index * stride.value + else: + offset_ += index * stride + else: + raise ValueError("Unexpected index type for flexible indexing") + if isinstance(stride, Node): + free_indices.extend(stride.free_indices) + if dim is not None and isinstance(offset_ + last, Integral) and offset_ + last >= dim: + raise ValueError("Offset {0} and indices {1} exceed dimension {2}".format(offset, idxs, dim)) + dim2idxs_.append((offset_, tuple(idxs_))) + self.children = (variable,) + self.dim2idxs = tuple(dim2idxs_) + self.free_indices = unique(free_indices) + indirect_children = [] + for offset, idxs in self.dim2idxs: + if isinstance(offset, Node): + indirect_children.append(offset) + for idx, stride in idxs: + if isinstance(idx, VariableIndex): + indirect_children.append(idx.expression) + if isinstance(stride, Node): + indirect_children.append(stride) + self.indirect_children = tuple(indirect_children) + + def index_ordering(self): + """Running indices in the order of indexing in this node.""" + free_indices = [] + for offset, idxs in self.dim2idxs: + if isinstance(offset, Node): + free_indices.extend(offset.free_indices) + for index, stride in idxs: + if isinstance(index, Index): + free_indices.append(index) + elif isinstance(index, VariableIndex): + free_indices.extend(index.expression.free_indices) + if isinstance(stride, Node): + free_indices.extend(stride.free_indices) + return tuple(free_indices) + + +class ComponentTensor(Node): + __slots__ = ('children', 'multiindex', 'shape') + __back__ = ('multiindex',) + + def __new__(cls, expression, multiindex): + assert not expression.shape + + # Empty multiindex + if not multiindex: + return expression + + # Collect shape + shape = tuple(index.extent for index in multiindex) + assert all(s >= 0 for s in shape) + + # Zero folding + if isinstance(expression, Zero): + return Zero(shape, dtype=expression.dtype) + + self = super(ComponentTensor, cls).__new__(cls) + self.children = (expression,) + self.multiindex = multiindex + self.shape = shape + + # Collect free indices + assert set(multiindex) <= set(expression.free_indices) + self.free_indices = unique(set(expression.free_indices) - set(multiindex)) + + return self + + +class IndexSum(Scalar): + __slots__ = ('children', 'multiindex') + __back__ = ('multiindex',) + + def __new__(cls, summand, multiindex): + # Sum zeros + assert not summand.shape + if isinstance(summand, Zero): + return summand + + # Unroll singleton sums + unroll = tuple(index for index in multiindex if index.extent <= 1) + if unroll: + assert numpy.prod([index.extent for index in unroll]) == 1 + summand = Indexed(ComponentTensor(summand, unroll), + (0,) * len(unroll)) + multiindex = tuple(index for index in multiindex + if index not in unroll) + + # No indices case + multiindex = tuple(multiindex) + if not multiindex: + return summand + + self = super(IndexSum, cls).__new__(cls) + self.children = (summand,) + self.multiindex = multiindex + + # Collect shape and free indices + assert set(multiindex) <= set(summand.free_indices) + self.free_indices = unique(set(summand.free_indices) - set(multiindex)) + + return self + + +class ListTensor(Node): + __slots__ = ('array',) + + def __new__(cls, array): + array = asarray(array) + assert numpy.prod(array.shape) + dtype = Node.inherit_dtype_from_children(tuple(array.flat)) + + # Handle children with shape + child_shape = array.flat[0].shape + assert all(elem.shape == child_shape for elem in array.flat) + + if child_shape: + # Destroy structure + direct_array = numpy.empty(array.shape + child_shape, dtype=object) + for alpha in numpy.ndindex(array.shape): + for beta in numpy.ndindex(child_shape): + direct_array[alpha + beta] = Indexed(array[alpha], beta) + array = direct_array + + # Constant folding + if all(isinstance(elem, Constant) for elem in array.flat): + return Literal(numpy.vectorize(attrgetter('value'))(array), dtype=dtype) + + self = super(ListTensor, cls).__new__(cls) + self.array = array + return self + + @property + def children(self): + return tuple(self.array.flat) + + @property + def shape(self): + return self.array.shape + + def __reduce__(self): + return type(self), (self.array,) + + def reconstruct(self, *args): + return ListTensor(asarray(args).reshape(self.array.shape)) + + def __repr__(self): + return "ListTensor(%r)" % self.array.tolist() + + def is_equal(self, other): + """Common subexpression eliminating equality predicate.""" + if type(self) is not type(other): + return False + if (self.array == other.array).all(): + self.array = other.array + return True + return False + + def get_hash(self): + return hash((type(self), self.shape, self.children)) + + +class Concatenate(Node): + """Flattens and concatenates GEM expressions by shape. + + Similar to what UFL MixedElement does to value shape. For + example, if children have shapes (2, 2), (), and (3,) then the + concatenated expression has shape (8,). + """ + __slots__ = ('children',) + + def __new__(cls, *children): + dtype = Node.inherit_dtype_from_children(children) + if all(isinstance(child, Zero) for child in children): + size = int(sum(numpy.prod(child.shape, dtype=int) for child in children)) + return Zero((size,), dtype=dtype) + + self = super(Concatenate, cls).__new__(cls) + self.children = children + return self + + @property + def shape(self): + return (int(sum(numpy.prod(child.shape, dtype=int) for child in self.children)),) + + +class Delta(Scalar, Terminal): + __slots__ = ('i', 'j') + __front__ = ('i', 'j') + __back__ = ('dtype',) + + def __new__(cls, i, j, dtype=None): + assert isinstance(i, IndexBase) + assert isinstance(j, IndexBase) + + # \delta_{i,i} = 1 + if i == j: + return one + + # Fixed indices + if isinstance(i, int) and isinstance(j, int): + return Literal(int(i == j)) + + self = super(Delta, cls).__new__(cls) + self.i = i + self.j = j + # Set up free indices + free_indices = [] + for index in (i, j): + if isinstance(index, Index): + free_indices.append(index) + elif isinstance(index, VariableIndex): + raise NotImplementedError("Can not make Delta with VariableIndex") + self.free_indices = tuple(unique(free_indices)) + self._dtype = dtype + return self + + +class Inverse(Node): + """The inverse of a square matrix.""" + __slots__ = ('children', 'shape') + + def __new__(cls, tensor): + assert len(tensor.shape) == 2 + assert tensor.shape[0] == tensor.shape[1] + + # Invert 1x1 matrix + if tensor.shape == (1, 1): + multiindex = (Index(), Index()) + return ComponentTensor(Division(one, Indexed(tensor, multiindex)), multiindex) + + self = super(Inverse, cls).__new__(cls) + self.children = (tensor,) + self.shape = tensor.shape + + return self + + +class Solve(Node): + """Solution of a square matrix equation with (potentially) multiple right hand sides. + + Represents the X obtained by solving AX = B. + """ + __slots__ = ('children', 'shape') + + def __init__(self, A, B): + # Shape requirements + assert B.shape + assert len(A.shape) == 2 + assert A.shape[0] == A.shape[1] + assert A.shape[0] == B.shape[0] + + self.children = (A, B) + self.shape = A.shape[1:] + B.shape[1:] + + +class OrientationVariableIndex(VariableIndex, FIATOrientation): + """VariableIndex representing a fiat orientation. + + Notes + ----- + In the current implementation, we need to extract + `VariableIndex.expression` as index arithmetic + is not supported (indices are not `Node`). + + """ + + def __floordiv__(self, other): + other = other.expression if isinstance(other, VariableIndex) else as_gem_uint(other) + return type(self)(FloorDiv(self.expression, other)) + + def __rfloordiv__(self, other): + other = other.expression if isinstance(other, VariableIndex) else as_gem_uint(other) + return type(self)(FloorDiv(other, self.expression)) + + def __mod__(self, other): + other = other.expression if isinstance(other, VariableIndex) else as_gem_uint(other) + return type(self)(Remainder(self.expression, other)) + + def __rmod__(self, other): + other = other.expression if isinstance(other, VariableIndex) else as_gem_uint(other) + return type(self)(Remainder(other, self.expression)) + + +def unique(indices): + """Sorts free indices and eliminates duplicates. + + :arg indices: iterable of indices + :returns: sorted tuple of unique free indices + """ + return tuple(sorted(set(indices), key=id)) + + +def index_sum(expression, indices): + """Eliminates indices from the free indices of an expression by + summing over them. Skips any index that is not a free index of + the expression.""" + multiindex = tuple(index for index in indices + if index in expression.free_indices) + return IndexSum(expression, multiindex) + + +def partial_indexed(tensor, indices): + """Generalised indexing into a tensor by eating shape off the front. + The number of indices may be less than or equal to the rank of the tensor, + so the result may have a non-empty shape. + + :arg tensor: tensor-valued GEM expression + :arg indices: indices, at most as many as the rank of the tensor + :returns: a potentially tensor-valued expression + """ + if len(indices) == 0: + return tensor + elif len(indices) < len(tensor.shape): + rank = len(tensor.shape) - len(indices) + shape_indices = tuple(Index() for i in range(rank)) + return ComponentTensor( + Indexed(tensor, indices + shape_indices), + shape_indices) + elif len(indices) == len(tensor.shape): + return Indexed(tensor, indices) + else: + raise ValueError("More indices than rank!") + + +def strides_of(shape): + """Calculate cumulative strides from per-dimension capacities. + + For example: + + [2, 3, 4] ==> [12, 4, 1] + + """ + temp = numpy.flipud(numpy.cumprod(numpy.flipud(list(shape)[1:]))) + return list(temp) + [1] + + +def decompose_variable_view(expression): + """Extract information from a shaped node. + Decompose ComponentTensor + FlexiblyIndexed.""" + if (isinstance(expression, (Variable, Inverse, Solve))): + variable = expression + indexes = tuple(Index(extent=extent) for extent in expression.shape) + dim2idxs = tuple((0, ((index, 1),)) for index in indexes) + elif (isinstance(expression, ComponentTensor) and + not isinstance(expression.children[0], FlexiblyIndexed)): + variable = expression + indexes = expression.multiindex + dim2idxs = tuple((0, ((index, 1),)) for index in indexes) + elif isinstance(expression, ComponentTensor) and isinstance(expression.children[0], FlexiblyIndexed): + variable = expression.children[0].children[0] + indexes = expression.multiindex + dim2idxs = expression.children[0].dim2idxs + else: + raise ValueError("Cannot handle {} objects.".format(type(expression).__name__)) + + return variable, dim2idxs, indexes + + +def reshape(expression, *shapes): + """Reshape a variable (splitting indices only). + + :arg expression: view of a :py:class:`Variable` + :arg shapes: one shape tuple for each dimension of the variable. + """ + variable, dim2idxs, indexes = decompose_variable_view(expression) + assert len(indexes) == len(shapes) + shape_of = dict(zip(indexes, shapes)) + + dim2idxs_ = [] + indices = [[] for _ in range(len(indexes))] + for offset, idxs in dim2idxs: + idxs_ = [] + for idx in idxs: + index, stride = idx + assert isinstance(index, Index) + dim = index.extent + shape = shape_of[index] + if dim is not None and numpy.prod(shape) != dim: + raise ValueError("Shape {} does not match extent {}.".format(shape, dim)) + strides = strides_of(shape) + for extent, stride_ in zip(shape, strides): + index_ = Index(extent=extent) + idxs_.append((index_, stride_ * stride)) + indices[indexes.index(index)].append(index_) + dim2idxs_.append((offset, tuple(idxs_))) + + expr = FlexiblyIndexed(variable, tuple(dim2idxs_)) + return ComponentTensor(expr, tuple(chain.from_iterable(indices))) + + +def view(expression, *slices): + """View a part of a shaped object. + + :arg expression: a node that has a shape + :arg slices: one slice object for each dimension of the expression. + """ + variable, dim2idxs, indexes = decompose_variable_view(expression) + assert len(indexes) == len(slices) + slice_of = dict(zip(indexes, slices)) + + dim2idxs_ = [] + indices = [None] * len(slices) + for offset, idxs in dim2idxs: + offset_ = offset + idxs_ = [] + for idx in idxs: + index, stride = idx + assert isinstance(index, Index) + dim = index.extent + s = slice_of[index] + start = s.start or 0 + stop = s.stop or dim + if stop is None: + raise ValueError("Unknown extent!") + if dim is not None and stop > dim: + raise ValueError("Slice exceeds dimension extent!") + step = s.step or 1 + offset_ += start * stride + extent = 1 + (stop - start - 1) // step + index_ = Index(extent=extent) + indices[indexes.index(index)] = index_ + idxs_.append((index_, step * stride)) + dim2idxs_.append((offset_, tuple(idxs_))) + + expr = FlexiblyIndexed(variable, tuple(dim2idxs_)) + return ComponentTensor(expr, tuple(indices)) + + +# Static one object for quicker constant folding +one = Literal(1) + + +# Syntax sugar +def indices(n): + """Make some :class:`Index` objects. + + :arg n: The number of indices to make. + :returns: A tuple of `n` :class:`Index` objects. + """ + return tuple(Index() for _ in range(n)) + + +def componentwise(op, *exprs): + """Apply gem op to exprs component-wise and wrap up in a ComponentTensor. + + :arg op: function that returns a gem Node. + :arg exprs: expressions to apply op to. + :raises ValueError: if the expressions have mismatching shapes. + :returns: New gem Node constructed from op. + + Each expression must either have the same shape, or else be + scalar. Shaped expressions are indexed, the op is applied to the + scalar expressions and the result is wrapped up in a ComponentTensor. + + """ + shapes = set(e.shape for e in exprs) + if len(shapes - {()}) > 1: + raise ValueError("expressions must have matching shape (or else be scalar)") + shape = max(shapes) + i = indices(len(shape)) + exprs = tuple(Indexed(e, i) if e.shape else e for e in exprs) + return ComponentTensor(op(*exprs), i) + + +def as_gem(expr): + """Attempt to convert an expression into GEM of scalar type. + + Parameters + ---------- + expr : Node or Number + The expression. + + Returns + ------- + Node + A GEM representation of the expression. + + Raises + ------ + ValueError + If conversion was not possible. + + """ + if isinstance(expr, Node): + return expr + elif isinstance(expr, Number): + return Literal(expr) + else: + raise ValueError("Do not know how to convert %r to GEM" % expr) + + +def as_gem_uint(expr): + """Attempt to convert an expression into GEM of uint type. + + Parameters + ---------- + expr : Node or Integral + The expression. + + Returns + ------- + Node + A GEM representation of the expression. + + Raises + ------ + ValueError + If conversion was not possible. + + """ + if isinstance(expr, Node): + return expr + elif isinstance(expr, Integral): + return Literal(expr, dtype=uint_type) + else: + raise ValueError("Do not know how to convert %r to GEM" % expr) + + +def extract_type(expressions, klass): + """Collects objects of type klass in expressions.""" + return tuple(node for node in traversal(expressions) if isinstance(node, klass)) diff --git a/gem/impero.py b/gem/impero.py new file mode 100644 index 000000000..c909e1bfc --- /dev/null +++ b/gem/impero.py @@ -0,0 +1,159 @@ +"""Impero is a helper AST for generating C code (or equivalent, +e.g. COFFEE) from GEM. An Impero expression is a proper tree, not +directed acyclic graph (DAG). Impero is a helper AST, not a +standalone language; it is incomplete without GEM as its terminals +refer to nodes from GEM expressions. + +Trivia: + - Impero helps translating GEM into an imperative language. + - Byzantine units in Age of Empires II sometimes say 'Impero?' + (Command?) after clicking on them. +""" + +from abc import ABCMeta, abstractmethod + +from gem.node import Node as NodeBase + + +class Node(NodeBase): + """Base class of all Impero nodes""" + + __slots__ = () + + +class Terminal(Node, metaclass=ABCMeta): + """Abstract class for terminal Impero nodes""" + + __slots__ = () + + children = () + + @abstractmethod + def loop_shape(self, free_indices): + """Gives the loop shape, an ordering of indices for an Impero + terminal. + + :arg free_indices: a callable mapping of GEM expressions to + ordered free indices. + """ + pass + + +class Evaluate(Terminal): + """Assign the value of a GEM expression to a temporary.""" + + __slots__ = ('expression',) + __front__ = ('expression',) + + def __init__(self, expression): + self.expression = expression + + def loop_shape(self, free_indices): + return free_indices(self.expression) + + +class Initialise(Terminal): + """Initialise an :class:`gem.IndexSum`.""" + + __slots__ = ('indexsum',) + __front__ = ('indexsum',) + + def __init__(self, indexsum): + self.indexsum = indexsum + + def loop_shape(self, free_indices): + return free_indices(self.indexsum) + + +class Accumulate(Terminal): + """Accumulate terms into an :class:`gem.IndexSum`.""" + + __slots__ = ('indexsum',) + __front__ = ('indexsum',) + + def __init__(self, indexsum): + self.indexsum = indexsum + + def loop_shape(self, free_indices): + return free_indices(self.indexsum.children[0]) + + +class Noop(Terminal): + """No-op terminal. Does not generate code, but wraps a GEM + expression to have a loop shape, thus affects loop fusion.""" + + __slots__ = ('expression',) + __front__ = ('expression',) + + def __init__(self, expression): + self.expression = expression + + def loop_shape(self, free_indices): + return free_indices(self.expression) + + +class Return(Terminal): + """Save value of GEM expression into an lvalue. Used to "return" + values from a kernel.""" + + __slots__ = ('variable', 'expression') + __front__ = ('variable', 'expression') + + def __init__(self, variable, expression): + assert set(variable.free_indices) >= set(expression.free_indices) + + self.variable = variable + self.expression = expression + + def loop_shape(self, free_indices): + return free_indices(self.variable) + + +class ReturnAccumulate(Terminal): + """Accumulate an :class:`gem.IndexSum` directly into a return + variable.""" + + __slots__ = ('variable', 'indexsum') + __front__ = ('variable', 'indexsum') + + def __init__(self, variable, indexsum): + assert set(variable.free_indices) == set(indexsum.free_indices) + + self.variable = variable + self.indexsum = indexsum + + def loop_shape(self, free_indices): + return free_indices(self.indexsum.children[0]) + + +class Block(Node): + """An ordered set of Impero expressions. Corresponds to a curly + braces block in C.""" + + __slots__ = ('children',) + + def __init__(self, statements): + self.children = tuple(statements) + + +class For(Node): + """For loop with an index which stores its extent, and a loop body + expression which is usually a :class:`Block`.""" + + __slots__ = ('index', 'children') + __front__ = ('index',) + + def __new__(cls, index, statement): + # In case of an empty loop, create a Noop instead. + # Related: https://github.com/coneoproject/COFFEE/issues/98 + assert isinstance(statement, Block) + if not statement.children: + # This "works" because the loop_shape of this node is not + # asked any more. + return Noop(None) + else: + return super(For, cls).__new__(cls) + + def __init__(self, index, statement): + self.index = index + self.children = (statement,) diff --git a/gem/impero_utils.py b/gem/impero_utils.py new file mode 100644 index 000000000..31f9565bb --- /dev/null +++ b/gem/impero_utils.py @@ -0,0 +1,322 @@ +"""Utilities for building an Impero AST from an ordered list of +terminal Impero operations, and for building any additional data +required for straightforward C code generation. + +What this module does is independent of the generated code target. +""" + +import collections +from functools import singledispatch +from itertools import chain, groupby + +from gem.node import traversal, collect_refcount +from gem import gem, impero as imp, optimise, scheduling + + +# ImperoC is named tuple for C code generation. +# +# Attributes: +# tree - Impero AST describing the loop structure and operations +# temporaries - List of GEM expressions which have assigned temporaries +# declare - Where to declare temporaries to get correct C code +# indices - Indices for declarations and referencing values +ImperoC = collections.namedtuple('ImperoC', ['tree', 'temporaries', 'declare', 'indices']) + + +class NoopError(Exception): + """No operations in the kernel.""" + pass + + +def preprocess_gem(expressions, replace_delta=True, remove_componenttensors=True): + """Lower GEM nodes that cannot be translated to C directly.""" + if remove_componenttensors: + expressions = optimise.remove_componenttensors(expressions) + if replace_delta: + expressions = optimise.replace_delta(expressions) + return expressions + + +def compile_gem(assignments, prefix_ordering, remove_zeros=False, + emit_return_accumulate=True): + """Compiles GEM to Impero. + + :arg assignments: list of (return variable, expression DAG root) pairs + :arg prefix_ordering: outermost loop indices + :arg remove_zeros: remove zero assignment to return variables + :arg emit_return_accumulate: emit ReturnAccumulate nodes (see + :func:`~.scheduling.emit_operations`)? If False, + split into Accumulate/Return pairs. Set to False if the + output tensor of kernels is not guaranteed to be zero on entry. + """ + # Remove zeros + if remove_zeros: + def nonzero(assignment): + variable, expression = assignment + return not isinstance(expression, gem.Zero) + assignments = list(filter(nonzero, assignments)) + + # Just the expressions + expressions = [expression for variable, expression in assignments] + + # Collect indices in a deterministic order + indices = list(collections.OrderedDict.fromkeys(chain.from_iterable( + node.index_ordering() + for node in traversal(expressions) + if isinstance(node, (gem.Indexed, gem.FlexiblyIndexed)) + ))) + + # Build ordered index map + index_ordering = make_prefix_ordering(indices, prefix_ordering) + apply_ordering = make_index_orderer(index_ordering) + + get_indices = lambda expr: apply_ordering(expr.free_indices) + + # Build operation ordering + ops = scheduling.emit_operations(assignments, get_indices, emit_return_accumulate) + + # Empty kernel + if len(ops) == 0: + raise NoopError() + + # Drop unnecessary temporaries + ops = inline_temporaries(expressions, ops) + + # Build Impero AST + tree = make_loop_tree(ops, get_indices) + + # Collect temporaries + temporaries = collect_temporaries(tree) + + # Determine declarations + declare, indices = place_declarations(tree, temporaries, get_indices) + + # Prepare ImperoC (Impero AST + other data for code generation) + return ImperoC(tree, temporaries, declare, indices) + + +def make_prefix_ordering(indices, prefix_ordering): + """Creates an ordering of ``indices`` which starts with those + indices in ``prefix_ordering``.""" + # Need to return deterministically ordered indices + return tuple(prefix_ordering) + tuple(k for k in indices if k not in prefix_ordering) + + +def make_index_orderer(index_ordering): + """Returns a function which given a set of indices returns those + indices in the order as they appear in ``index_ordering``.""" + idx2pos = {idx: pos for pos, idx in enumerate(index_ordering)} + + def apply_ordering(indices): + return tuple(sorted(indices, key=lambda i: idx2pos[i])) + return apply_ordering + + +def inline_temporaries(expressions, ops): + """Inline temporaries which could be inlined without blowing up + the code. + + :arg expressions: a multi-root GEM expression DAG, used for + reference counting + :arg ops: ordered list of Impero terminals + :returns: a filtered ``ops``, without the unnecessary + :class:`impero.Evaluate`s + """ + refcount = collect_refcount(expressions) + + candidates = set() # candidates for inlining + for op in ops: + if isinstance(op, imp.Evaluate): + expr = op.expression + if expr.shape == () and refcount[expr] == 1: + candidates.add(expr) + + # Prevent inlining that pulls expressions into inner loops + for node in traversal(expressions): + for child in node.children: + if child in candidates and set(child.free_indices) < set(node.free_indices): + candidates.remove(child) + + # Filter out candidates + return [op for op in ops if not (isinstance(op, imp.Evaluate) and op.expression in candidates)] + + +def collect_temporaries(tree): + """Collects GEM expressions to assign to temporaries from a list + of Impero terminals.""" + result = [] + for node in traversal((tree,)): + # IndexSum temporaries should be added either at Initialise or + # at Accumulate. The difference is only in ordering + # (numbering). We chose Accumulate here. + if isinstance(node, imp.Accumulate): + result.append(node.indexsum) + elif isinstance(node, imp.Evaluate): + result.append(node.expression) + return result + + +def make_loop_tree(ops, get_indices, level=0): + """Creates an Impero AST with loops from a list of operations and + their respective free indices. + + :arg ops: a list of Impero terminal nodes + :arg get_indices: callable mapping from GEM nodes to an ordering + of free indices + :arg level: depth of loop nesting + :returns: Impero AST with loops, without declarations + """ + keyfunc = lambda op: op.loop_shape(get_indices)[level:level+1] + statements = [] + for first_index, op_group in groupby(ops, keyfunc): + if first_index: + inner_block = make_loop_tree(op_group, get_indices, level+1) + statements.append(imp.For(first_index[0], inner_block)) + else: + statements.extend(op_group) + # Remove no-op terminals from the tree + statements = [s for s in statements if not isinstance(s, imp.Noop)] + return imp.Block(statements) + + +def place_declarations(tree, temporaries, get_indices): + """Determines where and how to declare temporaries for an Impero AST. + + :arg tree: Impero AST to determine the declarations for + :arg temporaries: list of GEM expressions which are assigned to + temporaries + :arg get_indices: callable mapping from GEM nodes to an ordering + of free indices + """ + numbering = {t: n for n, t in enumerate(temporaries)} + assert len(numbering) == len(temporaries) + + # Collect the total number of temporary references + total_refcount = collections.Counter() + for node in traversal((tree,)): + if isinstance(node, imp.Terminal): + total_refcount.update(temp_refcount(numbering, node)) + assert set(total_refcount) == set(temporaries) + + # Result + declare = {} + indices = {} + + @singledispatch + def recurse(expr, loop_indices): + """Visit an Impero AST to collect declarations. + + :arg expr: Impero tree node + :arg loop_indices: loop indices (in order) from the outer + loops surrounding ``expr`` + :returns: :class:`collections.Counter` with the reference + counts for each temporary in the subtree whose root + is ``expr`` + """ + return AssertionError("unsupported expression type %s" % type(expr)) + + @recurse.register(imp.Terminal) + def recurse_terminal(expr, loop_indices): + return temp_refcount(numbering, expr) + + @recurse.register(imp.For) + def recurse_for(expr, loop_indices): + return recurse(expr.children[0], loop_indices + (expr.index,)) + + @recurse.register(imp.Block) + def recurse_block(expr, loop_indices): + # Temporaries declared at the beginning of the block are + # collected here + declare[expr] = [] + + # Collect reference counts for the block + refcount = collections.Counter() + for statement in expr.children: + refcount.update(recurse(statement, loop_indices)) + + # Visit :class:`collections.Counter` in deterministic order + for e in sorted(refcount.keys(), key=lambda t: numbering[t]): + if refcount[e] == total_refcount[e]: + # If all references are within this block, then this + # block is the right place to declare the temporary. + assert loop_indices == get_indices(e)[:len(loop_indices)] + indices[e] = get_indices(e)[len(loop_indices):] + if indices[e]: + # Scalar-valued temporaries are not declared until + # their value is assigned. This does not really + # matter, but produces a more compact and nicer to + # read C code. + declare[expr].append(e) + # Remove expression from the ``refcount`` so it will + # not be declared again. + del refcount[e] + return refcount + + # Populate result + remainder = recurse(tree, ()) + assert not remainder + + # Set in ``declare`` for Impero terminals whether they should + # declare the temporary that they are writing to. + for node in traversal((tree,)): + if isinstance(node, imp.Terminal): + declare[node] = False + if isinstance(node, imp.Evaluate): + e = node.expression + elif isinstance(node, imp.Initialise): + e = node.indexsum + else: + continue + + if len(indices[e]) == 0: + declare[node] = True + + return declare, indices + + +def temp_refcount(temporaries, op): + """Collects the number of times temporaries are referenced when + generating code for an Impero terminal. + + :arg temporaries: set of temporaries + :arg op: Impero terminal + :returns: :class:`collections.Counter` object mapping some of + elements from ``temporaries`` to the number of times + they will referenced from ``op`` + """ + counter = collections.Counter() + + def recurse(o): + """Traverses expression until reaching temporaries, counting + temporary references.""" + if o in temporaries: + counter[o] += 1 + else: + for c in o.children: + recurse(c) + + def recurse_top(o): + """Traverses expression until reaching temporaries, counting + temporary references. Always descends into children at least + once, even when the root is a temporary.""" + if o in temporaries: + counter[o] += 1 + for c in o.children: + recurse(c) + + if isinstance(op, imp.Initialise): + counter[op.indexsum] += 1 + elif isinstance(op, imp.Accumulate): + recurse_top(op.indexsum) + elif isinstance(op, imp.Evaluate): + recurse_top(op.expression) + elif isinstance(op, imp.Return): + recurse(op.expression) + elif isinstance(op, imp.ReturnAccumulate): + recurse(op.indexsum.children[0]) + elif isinstance(op, imp.Noop): + pass + else: + raise AssertionError("unhandled operation: %s" % type(op)) + + return counter diff --git a/gem/interpreter.py b/gem/interpreter.py new file mode 100644 index 000000000..13eeb44a2 --- /dev/null +++ b/gem/interpreter.py @@ -0,0 +1,362 @@ +""" +An interpreter for GEM trees. +""" +import numpy +import operator +from collections import OrderedDict +from functools import singledispatch +import itertools + +from gem import gem, node +from gem.optimise import replace_delta + +__all__ = ("evaluate", ) + + +class Result(object): + """An array object that tracks which axes of the array correspond to + gem free indices (and what those free indices are). + + :arg arr: The array. + :arg fids: The free indices. + + The first ``len(fids)`` axes of the provided array correspond to + the free indices, the remaining axes are the shape of each entry. + """ + def __init__(self, arr, fids=None): + self.arr = arr + self.fids = fids if fids is not None else () + + def broadcast(self, fids): + """Given some free indices, return a broadcasted array which + contains extra dimensions that correspond to indices in fids + that are not in ``self.fids``. + + Note that inserted dimensions will have length one. + + :arg fids: The free indices for broadcasting. + """ + # Select free indices + axes = tuple(self.fids.index(fi) for fi in fids if fi in self.fids) + assert len(axes) == len(self.fids) + # Add shape + axes += tuple(range(len(self.fids), self.arr.ndim)) + # Move axes, insert extra axes + arr = numpy.transpose(self.arr, axes) + for i, fi in enumerate(fids): + if fi not in self.fids: + arr = numpy.expand_dims(arr, axis=i) + return arr + + def filter(self, idx, fids): + """Given an index tuple and some free indices, return a + "filtered" index tuple which removes entries that correspond + to indices in fids that are not in ``self.fids``. + + :arg idx: The index tuple to filter. + :arg fids: The free indices for the index tuple. + """ + return tuple(idx[fids.index(i)] for i in self.fids) + idx[len(fids):] + + def __getitem__(self, idx): + return self.arr[tuple(idx)] + + def __setitem__(self, idx, val): + self.arr[idx] = val + + @property + def tshape(self): + """The total shape of the result array.""" + return self.arr.shape + + @property + def fshape(self): + """The shape of the free index part of the result array.""" + return self.tshape[:len(self.fids)] + + @property + def shape(self): + """The shape of the shape part of the result array.""" + return self.tshape[len(self.fids):] + + def __repr__(self): + return "Result(%r, %r)" % (self.arr, self.fids) + + def __str__(self): + return repr(self) + + @classmethod + def empty(cls, *children, **kwargs): + """Build an empty Result object. + + :arg children: The children used to determine the shape and + free indices. + :kwarg dtype: The data type of the result array. + """ + dtype = kwargs.get("dtype", float) + assert all(children[0].shape == c.shape for c in children) + fids = [] + for f in itertools.chain(*(c.fids for c in children)): + if f not in fids: + fids.append(f) + shape = tuple(i.extent for i in fids) + children[0].shape + return cls(numpy.empty(shape, dtype=dtype), tuple(fids)) + + +@singledispatch +def _evaluate(expression, self): + """Evaluate an expression using a provided callback handler. + + :arg expression: The expression to evaluation. + :arg self: The callback handler (should provide bindings). + """ + raise ValueError("Unhandled node type %s" % type(expression)) + + +@_evaluate.register(gem.Zero) +def _evaluate_zero(e, self): + """Zeros produce an array of zeros.""" + return Result(numpy.zeros(e.shape, dtype=float)) + + +@_evaluate.register(gem.Failure) +def _evaluate_failure(e, self): + """Failure nodes produce NaNs.""" + return Result(numpy.full(e.shape, numpy.nan, dtype=float)) + + +@_evaluate.register(gem.Constant) +def _evaluate_constant(e, self): + """Constants return their array.""" + return Result(e.array) + + +@_evaluate.register(gem.Delta) +def _evaluate_delta(e, self): + """Lower delta and evaluate.""" + e, = replace_delta((e,)) + return self(e) + + +@_evaluate.register(gem.Variable) +def _evaluate_variable(e, self): + """Look up variables in the provided bindings.""" + try: + val = self.bindings[e] + except KeyError: + raise ValueError("Binding for %s not found" % e) + if val.shape != e.shape: + raise ValueError("Binding for %s has wrong shape. %s, not %s." % + (e, val.shape, e.shape)) + return Result(val) + + +@_evaluate.register(gem.Power) +@_evaluate.register(gem.Division) +@_evaluate.register(gem.Product) +@_evaluate.register(gem.Sum) +def _evaluate_operator(e, self): + op = {gem.Product: operator.mul, + gem.Division: operator.truediv, + gem.Sum: operator.add, + gem.Power: operator.pow}[type(e)] + + a, b = [self(o) for o in e.children] + result = Result.empty(a, b) + fids = result.fids + result.arr = op(a.broadcast(fids), b.broadcast(fids)) + return result + + +@_evaluate.register(gem.MathFunction) +def _evaluate_mathfunction(e, self): + ops = [self(o) for o in e.children] + result = Result.empty(*ops) + names = { + "abs": abs, + "log": numpy.log, + "real": operator.attrgetter("real"), + "imag": operator.attrgetter("imag"), + "conj": operator.methodcaller("conjugate"), + } + op = names[e.name] + for idx in numpy.ndindex(result.tshape): + result[idx] = op(*(o[o.filter(idx, result.fids)] for o in ops)) + return result + + +@_evaluate.register(gem.MaxValue) +@_evaluate.register(gem.MinValue) +def _evaluate_minmaxvalue(e, self): + ops = [self(o) for o in e.children] + result = Result.empty(*ops) + op = {gem.MinValue: min, + gem.MaxValue: max}[type(e)] + for idx in numpy.ndindex(result.tshape): + result[idx] = op(*(o[o.filter(idx, result.fids)] for o in ops)) + return result + + +@_evaluate.register(gem.Comparison) +def _evaluate_comparison(e, self): + ops = [self(o) for o in e.children] + op = {">": operator.gt, + ">=": operator.ge, + "==": operator.eq, + "!=": operator.ne, + "<": operator.lt, + "<=": operator.le}[e.operator] + result = Result.empty(*ops, dtype=bool) + for idx in numpy.ndindex(result.tshape): + result[idx] = op(*(o[o.filter(idx, result.fids)] for o in ops)) + return result + + +@_evaluate.register(gem.LogicalNot) +def _evaluate_logicalnot(e, self): + val = self(e.children[0]) + assert val.arr.dtype == numpy.dtype("bool") + result = Result.empty(val, bool) + for idx in numpy.ndindex(result.tshape): + result[idx] = not val[val.filter(idx, result.fids)] + return result + + +@_evaluate.register(gem.LogicalAnd) +def _evaluate_logicaland(e, self): + a, b = [self(o) for o in e.children] + assert a.arr.dtype == numpy.dtype("bool") + assert b.arr.dtype == numpy.dtype("bool") + result = Result.empty(a, b, bool) + for idx in numpy.ndindex(result.tshape): + result[idx] = a[a.filter(idx, result.fids)] and \ + b[b.filter(idx, result.fids)] + return result + + +@_evaluate.register(gem.LogicalOr) +def _evaluate_logicalor(e, self): + a, b = [self(o) for o in e.children] + assert a.arr.dtype == numpy.dtype("bool") + assert b.arr.dtype == numpy.dtype("bool") + result = Result.empty(a, b, dtype=bool) + for idx in numpy.ndindex(result.tshape): + result[idx] = a[a.filter(idx, result.fids)] or \ + b[b.filter(idx, result.fids)] + return result + + +@_evaluate.register(gem.Conditional) +def _evaluate_conditional(e, self): + cond, then, else_ = [self(o) for o in e.children] + assert cond.arr.dtype == numpy.dtype("bool") + result = Result.empty(cond, then, else_) + for idx in numpy.ndindex(result.tshape): + if cond[cond.filter(idx, result.fids)]: + result[idx] = then[then.filter(idx, result.fids)] + else: + result[idx] = else_[else_.filter(idx, result.fids)] + return result + + +@_evaluate.register(gem.Indexed) +def _evaluate_indexed(e, self): + """Indexing maps shape to free indices""" + val = self(e.children[0]) + fids = tuple(i for i in e.multiindex if isinstance(i, gem.Index)) + + idx = [] + # First pick up all the existing free indices + for _ in val.fids: + idx.append(slice(None)) + # Now grab the shape axes + for i in e.multiindex: + if isinstance(i, gem.Index): + # Free index, want entire extent + idx.append(slice(None)) + elif isinstance(i, gem.VariableIndex): + # Variable index, evaluate inner expression + result, = self(i.expression) + assert not result.tshape + idx.append(result[()]) + else: + # Fixed index, just pick that value + idx.append(i) + assert len(idx) == len(val.tshape) + return Result(val[idx], val.fids + fids) + + +@_evaluate.register(gem.ComponentTensor) +def _evaluate_componenttensor(e, self): + """Component tensors map free indices to shape.""" + val = self(e.children[0]) + axes = [] + fids = [] + # First grab the free indices that aren't bound + for a, f in enumerate(val.fids): + if f not in e.multiindex: + axes.append(a) + fids.append(f) + # Now the bound free indices + for i in e.multiindex: + axes.append(val.fids.index(i)) + # Now the existing shape + axes.extend(range(len(val.fshape), len(val.tshape))) + return Result(numpy.transpose(val.arr, axes=axes), + tuple(fids)) + + +@_evaluate.register(gem.IndexSum) +def _evaluate_indexsum(e, self): + """Index sums reduce over the given axis.""" + val = self(e.children[0]) + idx = tuple(map(val.fids.index, e.multiindex)) + rfids = tuple(fi for fi in val.fids if fi not in e.multiindex) + return Result(val.arr.sum(axis=idx), rfids) + + +@_evaluate.register(gem.ListTensor) +def _evaluate_listtensor(e, self): + """List tensors just turn into arrays.""" + ops = [self(o) for o in e.children] + tmp = Result.empty(*ops) + arrs = [numpy.broadcast_to(o.broadcast(tmp.fids), tmp.fshape) for o in ops] + arrs = numpy.moveaxis(numpy.asarray(arrs), 0, -1).reshape(tmp.fshape + e.shape) + return Result(arrs, tmp.fids) + + +@_evaluate.register(gem.Concatenate) +def _evaluate_concatenate(e, self): + """Concatenate nodes flatten and concatenate shapes.""" + ops = [self(o) for o in e.children] + fids = tuple(OrderedDict.fromkeys(itertools.chain(*(o.fids for o in ops)))) + fshape = tuple(i.extent for i in fids) + arrs = [] + for o in ops: + # Create temporary with correct shape + arr = numpy.empty(fshape + o.shape) + # Broadcast for extra free indices + arr[:] = o.broadcast(fids) + # Flatten shape + arr = arr.reshape(arr.shape[:arr.ndim-len(o.shape)] + (-1,)) + arrs.append(arr) + arrs = numpy.concatenate(arrs, axis=-1) + return Result(arrs, fids) + + +def evaluate(expressions, bindings=None): + """Evaluate some GEM expressions given variable bindings. + + :arg expressions: A single GEM expression, or iterable of + expressions to evaluate. + :kwarg bindings: An optional dict mapping GEM :class:`gem.Variable` + nodes to data. + :returns: a list of the evaluated expressions. + """ + try: + exprs = tuple(expressions) + except TypeError: + exprs = (expressions, ) + mapper = node.Memoizer(_evaluate) + mapper.bindings = bindings if bindings is not None else {} + return list(map(mapper, exprs)) diff --git a/gem/node.py b/gem/node.py new file mode 100644 index 000000000..5d9c5bf04 --- /dev/null +++ b/gem/node.py @@ -0,0 +1,281 @@ +"""Generic abstract node class and utility functions for creating +expression DAG languages.""" + +import collections +import gem + + +class Node(object): + """Abstract node class. + + Nodes are not meant to be modified. + + A node can reference other nodes; they are called children. A node + might contain data, or reference other objects which are not + themselves nodes; they are not called children. + + Both the children (if any) and non-child data (if any) are + required to create a node, or determine the equality of two + nodes. For reconstruction, however, only the new children are + necessary. + """ + + __slots__ = ('hash_value',) + + # Non-child data as the first arguments of the constructor. + # To be (potentially) overridden by derived node classes. + __front__ = () + + # Non-child data as the last arguments of the constructor. + # To be (potentially) overridden by derived node classes. + __back__ = () + + def _cons_args(self, children): + """Constructs an argument list for the constructor with + non-child data from 'self' and children from 'children'. + + Internally used utility function. + """ + front_args = [getattr(self, name) for name in self.__front__] + back_args = [getattr(self, name) for name in self.__back__] + + return tuple(front_args) + tuple(children) + tuple(back_args) + + def __reduce__(self): + # Gold version: + return type(self), self._cons_args(self.children) + + def reconstruct(self, *args): + """Reconstructs the node with new children from + 'args'. Non-child data are copied from 'self'. + + Returns a new object. + """ + return type(self)(*self._cons_args(args)) + + def __repr__(self): + cons_args = self._cons_args(self.children) + return "%s(%s)" % (type(self).__name__, ", ".join(map(repr, cons_args))) + + def __eq__(self, other): + """Provides equality testing with quick positive and negative + paths based on :func:`id` and :meth:`__hash__`. + """ + if self is other: + return True + elif hash(self) != hash(other): + return False + else: + return self.is_equal(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + """Provides caching for hash values.""" + try: + return self.hash_value + except AttributeError: + self.hash_value = self.get_hash() + return self.hash_value + + def is_equal(self, other): + """Equality predicate. + + This is the method to potentially override in derived classes, + not :meth:`__eq__` or :meth:`__ne__`. + """ + if type(self) is not type(other): + return False + self_consargs = self._cons_args(self.children) + other_consargs = other._cons_args(other.children) + return self_consargs == other_consargs + + def get_hash(self): + """Hash function. + + This is the method to potentially override in derived classes, + not :meth:`__hash__`. + """ + return hash((type(self),) + self._cons_args(self.children)) + + +def _make_traversal_children(node): + if isinstance(node, (gem.Indexed, gem.FlexiblyIndexed)): + # Include child nodes hidden in index expressions. + return node.children + node.indirect_children + else: + return node.children + + +def pre_traversal(expression_dags): + """Pre-order traversal of the nodes of expression DAGs. + + Notes + ----- + This function also walks through nodes in index expressions + (e.g., `VariableIndex`s); see ``_make_traversal_children()``. + + """ + seen = set() + lifo = [] + # Some roots might be same, but they must be visited only once. + # Keep the original ordering of roots, for deterministic code + # generation. + for root in expression_dags: + if root not in seen: + seen.add(root) + lifo.append(root) + + while lifo: + node = lifo.pop() + yield node + children = _make_traversal_children(node) + for child in reversed(children): + if child not in seen: + seen.add(child) + lifo.append(child) + + +def post_traversal(expression_dags): + """Post-order traversal of the nodes of expression DAGs. + + Notes + ----- + This function also walks through nodes in index expressions + (e.g., `VariableIndex`s); see ``_make_traversal_children()``. + + + """ + seen = set() + lifo = [] + # Some roots might be same, but they must be visited only once. + # Keep the original ordering of roots, for deterministic code + # generation. + for root in expression_dags: + if root not in seen: + seen.add(root) + lifo.append((root, list(_make_traversal_children(root)))) + + while lifo: + node, deps = lifo[-1] + for i, dep in enumerate(deps): + if dep is not None and dep not in seen: + lifo.append((dep, list(_make_traversal_children(dep)))) + deps[i] = None + break + else: + yield node + seen.add(node) + lifo.pop() + + +# Default to the more efficient pre-order traversal +traversal = pre_traversal + + +def collect_refcount(expression_dags): + """Collects reference counts for a multi-root expression DAG. + + Notes + ----- + This function also collects reference counts of nodes + in index expressions (e.g., `VariableIndex`s); see + ``_make_traversal_children()``. + + """ + result = collections.Counter(expression_dags) + for node in traversal(expression_dags): + result.update(_make_traversal_children(node)) + return result + + +def noop_recursive(function): + """No-op wrapper for functions with overridable recursive calls. + + :arg function: a function with parameters (value, rec), where + ``rec`` is expected to be a function used for + recursive calls. + :returns: a function with working recursion and nothing fancy + """ + def recursive(node): + return function(node, recursive) + return recursive + + +def noop_recursive_arg(function): + """No-op wrapper for functions with overridable recursive calls + and an argument. + + :arg function: a function with parameters (value, rec, arg), where + ``rec`` is expected to be a function used for + recursive calls. + :returns: a function with working recursion and nothing fancy + """ + def recursive(node, arg): + return function(node, recursive, arg) + return recursive + + +class Memoizer(object): + """Caching wrapper for functions with overridable recursive calls. + The lifetime of the cache is the lifetime of the object instance. + + :arg function: a function with parameters (value, rec), where + ``rec`` is expected to be a function used for + recursive calls. + :returns: a function with working recursion and caching + """ + def __init__(self, function): + self.cache = {} + self.function = function + + def __call__(self, node): + try: + return self.cache[node] + except KeyError: + result = self.function(node, self) + self.cache[node] = result + return result + + +class MemoizerArg(object): + """Caching wrapper for functions with overridable recursive calls + and an argument. The lifetime of the cache is the lifetime of the + object instance. + + :arg function: a function with parameters (value, rec, arg), where + ``rec`` is expected to be a function used for + recursive calls. + :returns: a function with working recursion and caching + """ + def __init__(self, function): + self.cache = {} + self.function = function + + def __call__(self, node, arg): + cache_key = (node, arg) + try: + return self.cache[cache_key] + except KeyError: + result = self.function(node, self, arg) + self.cache[cache_key] = result + return result + + +def reuse_if_untouched(node, self): + """Reuse if untouched recipe""" + new_children = list(map(self, node.children)) + if all(nc == c for nc, c in zip(new_children, node.children)): + return node + else: + return node.reconstruct(*new_children) + + +def reuse_if_untouched_arg(node, self, arg): + """Reuse if touched recipe propagating an extra argument""" + new_children = [self(child, arg) for child in node.children] + if all(nc == c for nc, c in zip(new_children, node.children)): + return node + else: + return node.reconstruct(*new_children) diff --git a/gem/optimise.py b/gem/optimise.py new file mode 100644 index 000000000..7d6c8ecd6 --- /dev/null +++ b/gem/optimise.py @@ -0,0 +1,698 @@ +"""A set of routines implementing various transformations on GEM +expressions.""" + +from collections import OrderedDict, defaultdict +from functools import singledispatch, partial, reduce +from itertools import combinations, permutations, zip_longest +from numbers import Integral + +import numpy + +from gem.utils import groupby +from gem.node import (Memoizer, MemoizerArg, reuse_if_untouched, + reuse_if_untouched_arg, traversal) +from gem.gem import (Node, Failure, Identity, Literal, Zero, + Product, Sum, Comparison, Conditional, Division, + Index, VariableIndex, Indexed, FlexiblyIndexed, + IndexSum, ComponentTensor, ListTensor, Delta, + partial_indexed, one) + + +@singledispatch +def literal_rounding(node, self): + """Perform FFC rounding of FIAT tabulation matrices on the literals of + a GEM expression. + + :arg node: root of the expression + :arg self: function for recursive calls + """ + raise AssertionError("cannot handle type %s" % type(node)) + + +literal_rounding.register(Node)(reuse_if_untouched) + + +@literal_rounding.register(Literal) +def literal_rounding_literal(node, self): + table = node.array + epsilon = self.epsilon + # Mimic the rounding applied at COFFEE formatting, which in turn + # mimics FFC formatting. + one_decimal = numpy.asarray(numpy.round(table, 1)) + one_decimal[numpy.logical_not(one_decimal)] = 0 # no minus zeros + return Literal(numpy.where(abs(table - one_decimal) < epsilon, one_decimal, table)) + + +def ffc_rounding(expression, epsilon): + """Perform FFC rounding of FIAT tabulation matrices on the literals of + a GEM expression. + + :arg expression: GEM expression + :arg epsilon: tolerance limit for rounding + """ + mapper = Memoizer(literal_rounding) + mapper.epsilon = epsilon + return mapper(expression) + + +@singledispatch +def _replace_division(node, self): + """Replace division with multiplication + + :param node: root of expression + :param self: function for recursive calls + """ + raise AssertionError("cannot handle type %s" % type(node)) + + +_replace_division.register(Node)(reuse_if_untouched) + + +@_replace_division.register(Division) +def _replace_division_division(node, self): + a, b = node.children + return Product(self(a), Division(one, self(b))) + + +def replace_division(expressions): + """Replace divisions with multiplications in expressions""" + mapper = Memoizer(_replace_division) + return list(map(mapper, expressions)) + + +@singledispatch +def replace_indices(node, self, subst): + """Replace free indices in a GEM expression. + + :arg node: root of the expression + :arg self: function for recursive calls + :arg subst: tuple of pairs; each pair is a substitution + rule with a free index to replace and an index to + replace with. + """ + raise AssertionError("cannot handle type %s" % type(node)) + + +replace_indices.register(Node)(reuse_if_untouched_arg) + + +def _replace_indices_atomic(i, self, subst): + if isinstance(i, VariableIndex): + new_expr = self(i.expression, subst) + return i if new_expr == i.expression else VariableIndex(new_expr) + else: + substitute = dict(subst) + return substitute.get(i, i) + + +@replace_indices.register(Delta) +def replace_indices_delta(node, self, subst): + i = _replace_indices_atomic(node.i, self, subst) + j = _replace_indices_atomic(node.j, self, subst) + if i == node.i and j == node.j: + return node + else: + return Delta(i, j) + + +@replace_indices.register(Indexed) +def replace_indices_indexed(node, self, subst): + child, = node.children + substitute = dict(subst) + multiindex = [] + for i in node.multiindex: + multiindex.append(_replace_indices_atomic(i, self, subst)) + if isinstance(child, ComponentTensor): + # Indexing into ComponentTensor + # Inline ComponentTensor and augment the substitution rules + substitute.update(zip(child.multiindex, multiindex)) + return self(child.children[0], tuple(sorted(substitute.items()))) + else: + # Replace indices + new_child = self(child, subst) + if new_child == child and multiindex == node.multiindex: + return node + else: + return Indexed(new_child, multiindex) + + +@replace_indices.register(FlexiblyIndexed) +def replace_indices_flexiblyindexed(node, self, subst): + child, = node.children + assert not child.free_indices + + dim2idxs = tuple( + ( + offset if isinstance(offset, Integral) else _replace_indices_atomic(offset, self, subst), + tuple((_replace_indices_atomic(i, self, subst), s if isinstance(s, Integral) else self(s, subst)) for i, s in idxs) + ) + for offset, idxs in node.dim2idxs + ) + + if dim2idxs == node.dim2idxs: + return node + else: + return FlexiblyIndexed(child, dim2idxs) + + +def filtered_replace_indices(node, self, subst): + """Wrapper for :func:`replace_indices`. At each call removes + substitution rules that do not apply.""" + if any(isinstance(k, VariableIndex) for k, _ in subst): + raise NotImplementedError("Can not replace VariableIndex (will need inverse)") + filtered_subst = tuple((k, v) for k, v in subst if k in node.free_indices) + return replace_indices(node, self, filtered_subst) + + +def remove_componenttensors(expressions): + """Removes all ComponentTensors in multi-root expression DAG.""" + mapper = MemoizerArg(filtered_replace_indices) + return [mapper(expression, ()) for expression in expressions] + + +@singledispatch +def _constant_fold_zero(node, self): + raise AssertionError("cannot handle type %s" % type(node)) + + +_constant_fold_zero.register(Node)(reuse_if_untouched) + + +@_constant_fold_zero.register(Literal) +def _constant_fold_zero_literal(node, self): + if (node.array == 0).all(): + # All zeros, make symbolic zero + return Zero(node.shape) + else: + return node + + +@_constant_fold_zero.register(ListTensor) +def _constant_fold_zero_listtensor(node, self): + new_children = list(map(self, node.children)) + if all(isinstance(nc, Zero) for nc in new_children): + return Zero(node.shape) + elif all(nc == c for nc, c in zip(new_children, node.children)): + return node + else: + return node.reconstruct(*new_children) + + +def constant_fold_zero(exprs): + """Produce symbolic zeros from Literals + + :arg exprs: An iterable of gem expressions. + :returns: A list of gem expressions where any Literal containing + only zeros is replaced by symbolic Zero of the appropriate + shape. + + We need a separate path for ListTensor so that its `reconstruct` + method will not be called when the new children are `Zero()`s; + otherwise Literal `0`s would be reintroduced. + """ + mapper = Memoizer(_constant_fold_zero) + return [mapper(e) for e in exprs] + + +def _select_expression(expressions, index): + """Helper function to select an expression from a list of + expressions with an index. This function expect sanitised input, + one should normally call :py:func:`select_expression` instead. + + :arg expressions: a list of expressions + :arg index: an index (free, fixed or variable) + :returns: an expression + """ + expr = expressions[0] + if all(e == expr for e in expressions): + return expr + + types = set(map(type, expressions)) + if types <= {Indexed, Zero}: + multiindex, = set(e.multiindex for e in expressions if isinstance(e, Indexed)) + # Shape only determined by free indices + shape = tuple(i.extent for i in multiindex if isinstance(i, Index)) + + def child(expression): + if isinstance(expression, Indexed): + return expression.children[0] + elif isinstance(expression, Zero): + return Zero(shape) + return Indexed(_select_expression(list(map(child, expressions)), index), multiindex) + + if types <= {Literal, Zero, Failure}: + return partial_indexed(ListTensor(expressions), (index,)) + + if types <= {ComponentTensor, Zero}: + shape, = set(e.shape for e in expressions) + multiindex = tuple(Index(extent=d) for d in shape) + children = remove_componenttensors([Indexed(e, multiindex) for e in expressions]) + return ComponentTensor(_select_expression(children, index), multiindex) + + if len(types) == 1: + cls, = types + if cls.__front__ or cls.__back__: + raise NotImplementedError("How to factorise {} expressions?".format(cls.__name__)) + assert all(len(e.children) == len(expr.children) for e in expressions) + assert len(expr.children) > 0 + + return expr.reconstruct(*[_select_expression(nth_children, index) + for nth_children in zip(*[e.children + for e in expressions])]) + + raise NotImplementedError("No rule for factorising expressions of this kind.") + + +def select_expression(expressions, index): + """Select an expression from a list of expressions with an index. + Semantically equivalent to + + partial_indexed(ListTensor(expressions), (index,)) + + but has a much more optimised implementation. + + :arg expressions: a list of expressions of the same shape + :arg index: an index (free, fixed or variable) + :returns: an expression of the same shape as the given expressions + """ + # Check arguments + shape = expressions[0].shape + assert all(e.shape == shape for e in expressions) + + # Sanitise input expressions + alpha = tuple(Index() for s in shape) + exprs = remove_componenttensors([Indexed(e, alpha) for e in expressions]) + + # Factor the expressions recursively and convert result + selected = _select_expression(exprs, index) + return ComponentTensor(selected, alpha) + + +def delta_elimination(sum_indices, factors): + """IndexSum-Delta cancellation. + + :arg sum_indices: free indices for contractions + :arg factors: product factors + :returns: optimised (sum_indices, factors) + """ + sum_indices = list(sum_indices) # copy for modification + + def substitute(expression, from_, to_): + if from_ not in expression.free_indices: + return expression + elif isinstance(expression, Delta): + mapper = MemoizerArg(filtered_replace_indices) + return mapper(expression, ((from_, to_),)) + else: + return Indexed(ComponentTensor(expression, (from_,)), (to_,)) + + delta_queue = [(f, index) + for f in factors if isinstance(f, Delta) + for index in (f.i, f.j) if index in sum_indices] + while delta_queue: + delta, from_ = delta_queue[0] + to_, = list({delta.i, delta.j} - {from_}) + + sum_indices.remove(from_) + + factors = [substitute(f, from_, to_) for f in factors] + + delta_queue = [(f, index) + for f in factors if isinstance(f, Delta) + for index in (f.i, f.j) if index in sum_indices] + + return sum_indices, factors + + +def associate(operator, operands): + """Apply associativity rules to construct an operation-minimal expression tree. + + For best performance give factors that have different set of free indices. + + :arg operator: associative binary operator + :arg operands: list of operands + + :returns: (reduced expression, # of floating-point operations) + """ + if len(operands) > 32: + # O(N^3) algorithm + raise NotImplementedError("Not expected such a complicated expression!") + + def count(pair): + """Operation count to reduce a pair of GEM expressions""" + a, b = pair + extents = [i.extent for i in set().union(a.free_indices, b.free_indices)] + return numpy.prod(extents, dtype=int) + + flops = 0 + while len(operands) > 1: + # Greedy algorithm: choose a pair of operands that are the + # cheapest to reduce. + a, b = min(combinations(operands, 2), key=count) + flops += count((a, b)) + # Remove chosen factors, append their product + operands.remove(a) + operands.remove(b) + operands.append(operator(a, b)) + result, = operands + return result, flops + + +def sum_factorise(sum_indices, factors): + """Optimise a tensor product through sum factorisation. + + :arg sum_indices: free indices for contractions + :arg factors: product factors + :returns: optimised GEM expression + """ + if len(factors) == 0 and len(sum_indices) == 0: + # Empty product + return one + + if len(sum_indices) > 6: + raise NotImplementedError("Too many indices for sum factorisation!") + + # Form groups by free indices + groups = groupby(factors, key=lambda f: f.free_indices) + groups = [reduce(Product, terms) for _, terms in groups] + + # Sum factorisation + expression = None + best_flops = numpy.inf + + # Consider all orderings of contraction indices + for ordering in permutations(sum_indices): + terms = groups[:] + flops = 0 + # Apply contraction index by index + for sum_index in ordering: + # Select terms that need to be part of the contraction + contract = [t for t in terms if sum_index in t.free_indices] + deferred = [t for t in terms if sum_index not in t.free_indices] + + # Optimise associativity + product, flops_ = associate(Product, contract) + term = IndexSum(product, (sum_index,)) + flops += flops_ + numpy.prod([i.extent for i in product.free_indices], dtype=int) + + # Replace the contracted terms with the result of the + # contraction. + terms = deferred + [term] + + # If some contraction indices were independent, then we may + # still have several terms at this point. + expr, flops_ = associate(Product, terms) + flops += flops_ + + if flops < best_flops: + expression = expr + best_flops = flops + + return expression + + +def make_sum(summands): + """Constructs an operation-minimal sum of GEM expressions.""" + groups = groupby(summands, key=lambda f: f.free_indices) + summands = [reduce(Sum, terms) for _, terms in groups] + result, flops = associate(Sum, summands) + return result + + +def make_product(factors, sum_indices=()): + """Constructs an operation-minimal (tensor) product of GEM expressions.""" + return sum_factorise(sum_indices, factors) + + +def make_rename_map(): + """Creates an rename map for reusing the same index renames.""" + return defaultdict(Index) + + +def make_renamer(rename_map): + r"""Creates a function for renaming indices when expanding products of + IndexSums, i.e. applying to following rule: + + (\sum_i a_i)*(\sum_i b_i) ===> \sum_{i,i'} a_i*b_{i'} + + :arg rename_map: An rename map for renaming indices the same way + as functions returned by other calls of this + function. + :returns: A function that takes an iterable of indices to rename, + and returns (renamed indices, applier), where applier is + a function that remap the free indices of GEM + expressions from the old to the new indices. + """ + def _renamer(rename_map, current_set, incoming): + renamed = [] + renames = [] + for i in incoming: + j = i + while j in current_set: + j = rename_map[j] + current_set.add(j) + renamed.append(j) + if i != j: + renames.append((i, j)) + + if renames: + def applier(expr): + pairs = [(i, j) for i, j in renames if i in expr.free_indices] + if pairs: + current, renamed = zip(*pairs) + return Indexed(ComponentTensor(expr, current), renamed) + else: + return expr + else: + applier = lambda expr: expr + + return tuple(renamed), applier + return partial(_renamer, rename_map, set()) + + +def traverse_product(expression, stop_at=None, rename_map=None): + """Traverses a product tree and collects factors, also descending into + tensor contractions (IndexSum). The nominators of divisions are + also broken up, but not the denominators. + + :arg expression: a GEM expression + :arg stop_at: Optional predicate on GEM expressions. If specified + and returns true for some subexpression, that + subexpression is not broken into further factors + even if it is a product-like expression. + :arg rename_map: an rename map for consistent index renaming + :returns: (sum_indices, terms) + - sum_indices: list of indices to sum over + - terms: list of product terms + """ + if rename_map is None: + rename_map = make_rename_map() + renamer = make_renamer(rename_map) + + sum_indices = [] + terms = [] + + stack = [expression] + while stack: + expr = stack.pop() + if stop_at is not None and stop_at(expr): + terms.append(expr) + elif isinstance(expr, IndexSum): + indices, applier = renamer(expr.multiindex) + sum_indices.extend(indices) + stack.extend(remove_componenttensors(map(applier, expr.children))) + elif isinstance(expr, Product): + stack.extend(reversed(expr.children)) + elif isinstance(expr, Division): + # Break up products in the dividend, but not in divisor. + dividend, divisor = expr.children + if dividend == one: + terms.append(expr) + else: + stack.append(Division(one, divisor)) + stack.append(dividend) + else: + terms.append(expr) + + return sum_indices, terms + + +def traverse_sum(expression, stop_at=None): + """Traverses a summation tree and collects summands. + + :arg expression: a GEM expression + :arg stop_at: Optional predicate on GEM expressions. If specified + and returns true for some subexpression, that + subexpression is not broken into further summands + even if it is an addition. + :returns: list of summand expressions + """ + stack = [expression] + result = [] + while stack: + expr = stack.pop() + if stop_at is not None and stop_at(expr): + result.append(expr) + elif isinstance(expr, Sum): + stack.extend(reversed(expr.children)) + else: + result.append(expr) + return result + + +def contraction(expression, ignore=None): + """Optimise the contractions of the tensor product at the root of + the expression, including: + + - IndexSum-Delta cancellation + - Sum factorisation + + :arg ignore: Optional set of indices to ignore when applying sum + factorisation (otherwise all summation indices will be + considered). Use this if your expression has many contraction + indices. + + This routine was designed with finite element coefficient + evaluation in mind. + """ + # Eliminate annoying ComponentTensors + expression, = remove_componenttensors([expression]) + + # Flatten product tree, eliminate deltas, sum factorise + def rebuild(expression): + sum_indices, factors = delta_elimination(*traverse_product(expression)) + factors = remove_componenttensors(factors) + if ignore is not None: + # TODO: This is a really blunt instrument and one might + # plausibly want the ignored indices to be contracted on + # the inside rather than the outside. + extra = tuple(i for i in sum_indices if i in ignore) + to_factor = tuple(i for i in sum_indices if i not in ignore) + return IndexSum(sum_factorise(to_factor, factors), extra) + else: + return sum_factorise(sum_indices, factors) + + # Sometimes the value shape is composed as a ListTensor, which + # could get in the way of decomposing factors. In particular, + # this is the case for H(div) and H(curl) conforming tensor + # product elements. So if ListTensors are used, they are pulled + # out to be outermost, so we can straightforwardly factorise each + # of its entries. + lt_fis = OrderedDict() # ListTensor free indices + for node in traversal((expression,)): + if isinstance(node, Indexed): + child, = node.children + if isinstance(child, ListTensor): + lt_fis.update(zip_longest(node.multiindex, ())) + lt_fis = tuple(index for index in lt_fis if index in expression.free_indices) + + if lt_fis: + # Rebuild each split component + tensor = ComponentTensor(expression, lt_fis) + entries = [Indexed(tensor, zeta) for zeta in numpy.ndindex(tensor.shape)] + entries = remove_componenttensors(entries) + return Indexed(ListTensor( + numpy.array(list(map(rebuild, entries))).reshape(tensor.shape) + ), lt_fis) + else: + # Rebuild whole expression at once + return rebuild(expression) + + +@singledispatch +def _replace_delta(node, self): + raise AssertionError("cannot handle type %s" % type(node)) + + +_replace_delta.register(Node)(reuse_if_untouched) + + +@_replace_delta.register(Delta) +def _replace_delta_delta(node, self): + i, j = node.i, node.j + + if isinstance(i, Index) or isinstance(j, Index): + if isinstance(i, Index) and isinstance(j, Index): + assert i.extent == j.extent + if isinstance(i, Index): + assert i.extent is not None + size = i.extent + if isinstance(j, Index): + assert j.extent is not None + size = j.extent + return Indexed(Identity(size), (i, j)) + else: + def expression(index): + if isinstance(index, int): + return Literal(index) + elif isinstance(index, VariableIndex): + return index.expression + else: + raise ValueError("Cannot convert running index to expression.") + e_i = expression(i) + e_j = expression(j) + return Conditional(Comparison("==", e_i, e_j), one, Zero()) + + +def replace_delta(expressions): + """Lowers all Deltas in a multi-root expression DAG.""" + mapper = Memoizer(_replace_delta) + return list(map(mapper, expressions)) + + +@singledispatch +def _unroll_indexsum(node, self): + """Unrolls IndexSums below a certain extent. + + :arg node: root of the expression + :arg self: function for recursive calls + """ + raise AssertionError("cannot handle type %s" % type(node)) + + +_unroll_indexsum.register(Node)(reuse_if_untouched) + + +@_unroll_indexsum.register(IndexSum) # noqa +def _(node, self): + unroll = tuple(filter(self.predicate, node.multiindex)) + if unroll: + # Unrolling + summand = self(node.children[0]) + shape = tuple(index.extent for index in unroll) + unrolled = reduce(Sum, + (Indexed(ComponentTensor(summand, unroll), alpha) + for alpha in numpy.ndindex(shape)), + Zero()) + return IndexSum(unrolled, tuple(index for index in node.multiindex + if index not in unroll)) + else: + return reuse_if_untouched(node, self) + + +def unroll_indexsum(expressions, predicate): + """Unrolls IndexSums below a specified extent. + + :arg expressions: list of expression DAGs + :arg predicate: a predicate function on :py:class:`Index` objects + that tells whether to unroll a particular index + :returns: list of expression DAGs with some unrolled IndexSums + """ + mapper = Memoizer(_unroll_indexsum) + mapper.predicate = predicate + return list(map(mapper, expressions)) + + +def aggressive_unroll(expression): + """Aggressively unrolls all loop structures.""" + # Unroll expression shape + if expression.shape: + tensor = numpy.empty(expression.shape, dtype=object) + for alpha in numpy.ndindex(expression.shape): + tensor[alpha] = Indexed(expression, alpha) + expression, = remove_componenttensors((ListTensor(tensor),)) + + # Unroll summation + expression, = unroll_indexsum((expression,), predicate=lambda index: True) + expression, = remove_componenttensors((expression,)) + return expression diff --git a/gem/pprint.py b/gem/pprint.py new file mode 100644 index 000000000..9c2451235 --- /dev/null +++ b/gem/pprint.py @@ -0,0 +1,209 @@ +"""Pretty-printing GEM expressions.""" +from collections import defaultdict +import itertools + +from functools import singledispatch + +from gem import gem +from gem.node import collect_refcount, post_traversal + + +class Context(object): + def __init__(self): + expr_counter = itertools.count(1) + self.expr_name = defaultdict(lambda: "${}".format(next(expr_counter))) + index_counter = itertools.count(1) + self.index_name = defaultdict(lambda: "i_{}".format(next(index_counter))) + self.index_names = set() + + def force_expression(self, expr): + assert isinstance(expr, gem.Node) + return self.expr_name[expr] + + def expression(self, expr): + assert isinstance(expr, gem.Node) + return self.expr_name.get(expr) + + def index(self, index): + assert isinstance(index, gem.Index) + if index.name is None: + name = self.index_name[index] + elif index.name not in self.index_names: + name = index.name + self.index_name[index] = name + else: + name_ = index.name + for i in itertools.count(1): + name = "{}~{}".format(name_, i) + if name not in self.index_names: + break + self.index_names.add(name) + return name + + +global_context = Context() + + +def pprint(expression_dags, context=global_context): + refcount = collect_refcount(expression_dags) + + def force(node): + if isinstance(node, gem.Variable): + return False + if node.shape: + return True + if isinstance(node, (gem.Constant, gem.Indexed, gem.FlexiblyIndexed)): + return False + return refcount[node] > 1 + + for node in post_traversal(expression_dags): + if force(node): + context.force_expression(node) + + name = context.expression(node) + if name is not None: + print(make_decl(node, name, context), '=', to_str(node, context, top=True)) + + for i, root in enumerate(expression_dags): + name = "#%d" % (i + 1) + print(make_decl(root, name, context), '=', to_str(root, context)) + + +def make_decl(node, name, ctx): + result = name + if node.shape: + result += '[' + ','.join(map(repr, node.shape)) + ']' + if node.free_indices: + result += '{' + ','.join(map(ctx.index, node.free_indices)) + '}' + return result + + +def to_str(expr, ctx, prec=None, top=False): + if not top and ctx.expression(expr): + result = ctx.expression(expr) + if expr.free_indices: + result += '{' + ','.join(map(ctx.index, expr.free_indices)) + '}' + return result + else: + return _to_str(expr, ctx, prec=prec) + + +@singledispatch +def _to_str(node, ctx, prec): + raise AssertionError("GEM node expected") + + +@_to_str.register(gem.Node) +def _to_str_node(node, ctx, prec): + front_args = [repr(getattr(node, name)) for name in node.__front__] + back_args = [repr(getattr(node, name)) for name in node.__back__] + children = [to_str(child, ctx) for child in node.children] + return "%s(%s)" % (type(node).__name__, ", ".join(front_args + children + back_args)) + + +@_to_str.register(gem.Zero) +def _to_str_zero(node, ctx, prec): + assert not node.shape + return "%g" % node.value + + +@_to_str.register(gem.Literal) +def _to_str_literal(node, ctx, prec): + if node.shape: + return repr(node.array.tolist()) + else: + return "%g" % node.value + + +@_to_str.register(gem.Variable) +def _to_str_variable(node, ctx, prec): + return node.name + + +@_to_str.register(gem.ListTensor) +def _to_str_listtensor(node, ctx, prec): + def recurse_rank(array): + if len(array.shape) > 1: + return '[' + ', '.join(map(recurse_rank, array)) + ']' + else: + return '[' + ', '.join(to_str(item, ctx) for item in array) + ']' + + return recurse_rank(node.array) + + +@_to_str.register(gem.Indexed) +def _to_str_indexed(node, ctx, prec): + child, = node.children + result = to_str(child, ctx) + dimensions = [] + for index in node.multiindex: + if isinstance(index, gem.Index): + dimensions.append(ctx.index(index)) + elif isinstance(index, int): + dimensions.append(str(index)) + else: + dimensions.append(to_str(index.expression, ctx)) + result += '[' + ','.join(dimensions) + ']' + return result + + +@_to_str.register(gem.FlexiblyIndexed) +def _to_str_flexiblyindexed(node, ctx, prec): + child, = node.children + result = to_str(child, ctx) + dimensions = [] + for offset, idxs in node.dim2idxs: + parts = [] + if offset: + parts.append(str(offset)) + for index, stride in idxs: + index_name = ctx.index(index) + assert stride + if stride == 1: + parts.append(index_name) + else: + parts.append(index_name + "*" + str(stride)) + if parts: + dimensions.append(' + '.join(parts)) + else: + dimensions.append('0') + if dimensions: + result += '[' + ','.join(dimensions) + ']' + return result + + +@_to_str.register(gem.IndexSum) +def _to_str_indexsum(node, ctx, prec): + result = 'Sum_{' + ','.join(map(ctx.index, node.multiindex)) + '} ' + to_str(node.children[0], ctx, prec=2) + if prec is not None and prec > 2: + result = '({})'.format(result) + return result + + +@_to_str.register(gem.ComponentTensor) +def _to_str_componenttensor(node, ctx, prec): + return to_str(node.children[0], ctx) + '|' + ','.join(ctx.index(i) for i in node.multiindex) + + +@_to_str.register(gem.Sum) +def _to_str_sum(node, ctx, prec): + children = [to_str(child, ctx, prec=1) for child in node.children] + result = " + ".join(children) + if prec is not None and prec > 1: + result = "({})".format(result) + return result + + +@_to_str.register(gem.Product) +def _to_str_product(node, ctx, prec): + children = [to_str(child, ctx, prec=3) for child in node.children] + result = "*".join(children) + if prec is not None and prec > 3: + result = "({})".format(result) + return result + + +@_to_str.register(gem.MathFunction) +def _to_str_mathfunction(node, ctx, prec): + child, = node.children + return node.name + "(" + to_str(child, ctx) + ")" diff --git a/gem/refactorise.py b/gem/refactorise.py new file mode 100644 index 000000000..2ca6e4cc0 --- /dev/null +++ b/gem/refactorise.py @@ -0,0 +1,304 @@ +"""Data structures and algorithms for generic expansion and +refactorisation.""" + +from collections import Counter, OrderedDict, defaultdict, namedtuple +from functools import singledispatch +from itertools import product +from sys import intern + +from gem.node import Memoizer, traversal +from gem.gem import (Node, Conditional, Zero, Product, Sum, Indexed, + ListTensor, one, MathFunction) +from gem.optimise import (remove_componenttensors, sum_factorise, + traverse_product, traverse_sum, unroll_indexsum, + make_rename_map, make_renamer) + + +# Refactorisation labels + +ATOMIC = intern('atomic') +"""Label: the expression need not be broken up into smaller parts""" + +COMPOUND = intern('compound') +"""Label: the expression must be broken up into smaller parts""" + +OTHER = intern('other') +"""Label: the expression is irrelevant with regards to refactorisation""" + + +Monomial = namedtuple('Monomial', ['sum_indices', 'atomics', 'rest']) +"""Monomial type, representation of a tensor product with some +distinguished factors (called atomics). + +- sum_indices: indices to sum over +- atomics: tuple of expressions classified as ATOMIC +- rest: a single expression classified as OTHER + +A :py:class:`Monomial` is a structured description of the expression: + +.. code-block:: python + + IndexSum(reduce(Product, atomics, rest), sum_indices) + +""" + + +class MonomialSum(object): + """Represents a sum of :py:class:`Monomial`s. + + The set of :py:class:`Monomial` summands are represented as a + mapping from a pair of unordered ``sum_indices`` and unordered + ``atomics`` to a ``rest`` GEM expression. This representation + makes it easier to merge similar monomials. + """ + def __init__(self): + # (unordered sum_indices, unordered atomics) -> rest + self.monomials = defaultdict(Zero) + + # We shall retain ordering for deterministic code generation: + # + # (unordered sum_indices, unordered atomics) -> + # (ordered sum_indices, ordered atomics) + self.ordering = OrderedDict() + + def __len__(self): + return len(self.ordering) + + def add(self, sum_indices, atomics, rest): + """Updates the :py:class:`MonomialSum` adding a new monomial.""" + sum_indices = tuple(sum_indices) + sum_indices_set = frozenset(sum_indices) + # Sum indices cannot have duplicates + assert len(sum_indices) == len(sum_indices_set) + + atomics = tuple(atomics) + atomics_set = frozenset(Counter(atomics).items()) + + assert isinstance(rest, Node) + + key = (sum_indices_set, atomics_set) + self.monomials[key] = Sum(self.monomials[key], rest) + self.ordering.setdefault(key, (sum_indices, atomics)) + + def __iter__(self): + """Iteration yields :py:class:`Monomial` objects""" + for key, (sum_indices, atomics) in self.ordering.items(): + rest = self.monomials[key] + yield Monomial(sum_indices, atomics, rest) + + @staticmethod + def sum(*args): + """Sum of multiple :py:class:`MonomialSum`s""" + result = MonomialSum() + for arg in args: + assert isinstance(arg, MonomialSum) + # Optimised implementation: no need to decompose and + # reconstruct key. + for key, rest in arg.monomials.items(): + result.monomials[key] = Sum(result.monomials[key], rest) + for key, value in arg.ordering.items(): + result.ordering.setdefault(key, value) + return result + + @staticmethod + def product(*args, **kwargs): + """Product of multiple :py:class:`MonomialSum`s""" + rename_map = kwargs.pop('rename_map', None) + if rename_map is None: + rename_map = make_rename_map() + if kwargs: + raise ValueError("Unrecognised keyword argument: " + kwargs.pop()) + + result = MonomialSum() + for monomials in product(*args): + renamer = make_renamer(rename_map) + sum_indices = [] + atomics = [] + rest = one + for s, a, r in monomials: + s_, applier = renamer(s) + sum_indices.extend(s_) + atomics.extend(map(applier, a)) + rest = Product(applier(r), rest) + result.add(sum_indices, atomics, rest) + return result + + +class FactorisationError(Exception): + """Raised when factorisation fails to achieve some desired form.""" + pass + + +@singledispatch +def _collect_monomials(expression, self): + """Refactorises an expression into a sum-of-products form, using + distributivity rules (i.e. a*(b + c) -> a*b + a*c). Expansion + proceeds until all "compound" expressions are broken up. + + :arg expression: a GEM expression to refactorise + :arg self: function for recursive calls + + :returns: :py:class:`MonomialSum` + + :raises FactorisationError: Failed to break up some "compound" + expressions with expansion. + """ + # Phase 1: Collect and categorise product terms + def stop_at(expr): + # Break up compounds only + return self.classifier(expr) != COMPOUND + common_indices, terms = traverse_product(expression, stop_at=stop_at) + common_indices = tuple(common_indices) + + common_atomics = [] + common_others = [] + compounds = [] + for term in terms: + label = self.classifier(term) + if label == ATOMIC: + common_atomics.append(term) + elif label == COMPOUND: + compounds.append(term) + elif label == OTHER: + common_others.append(term) + else: + raise ValueError("Classifier returned illegal value.") + common_atomics = tuple(common_atomics) + + # Phase 2: Attempt to break up compound terms into summands + sums = [] + for expr in compounds: + summands = traverse_sum(expr, stop_at=stop_at) + if len(summands) <= 1 and not isinstance(expr, (Conditional, MathFunction)): + # Compound term is not an addition, avoid infinite + # recursion and fail gracefully raising an exception. + raise FactorisationError(expr) + # Recurse into each summand, concatenate their results + sums.append(MonomialSum.sum(*map(self, summands))) + + # Phase 3: Expansion + # + # Each element of ``sums`` is a MonomialSum. Expansion produces a + # series (representing a sum) of products of monomials. + result = MonomialSum() + for s, a, r in MonomialSum.product(*sums, rename_map=self.rename_map): + renamer = make_renamer(self.rename_map) + renamer(common_indices) # update current_set + s_, applier = renamer(s) + + all_indices = common_indices + s_ + atomics = common_atomics + tuple(map(applier, a)) + + # All free indices that appear in atomic terms + atomic_indices = set().union(*[atomic.free_indices + for atomic in atomics]) + + # Sum indices that appear in atomic terms + # (will go to the result :py:class:`Monomial`) + sum_indices = tuple(index for index in all_indices + if index in atomic_indices) + + # Sum indices that do not appear in atomic terms + # (can factorise them over atomic terms immediately) + rest_indices = tuple(index for index in all_indices + if index not in atomic_indices) + + # Not really sum factorisation, but rather just an optimised + # way of building a product. + rest = sum_factorise(rest_indices, common_others + [applier(r)]) + + result.add(sum_indices, atomics, rest) + return result + + +@_collect_monomials.register(MathFunction) +def _collect_monomials_mathfunction(expression, self): + name = expression.name + if name in {"conj", "real", "imag"}: + # These are allowed to be applied to arguments, and hence must + # be dealt with specially. Just push the function onto each + # entry in the monomialsum of the child. + # NOTE: This presently assumes that the "atomics" part of a + # MonomialSum are real. This is true for the coffee, tensor, + # spectral modes: the atomics are indexed tabulation matrices + # (which are guaranteed real). + # If the classifier puts (potentially) complex expressions in + # atomics, then this code needs fixed. + child_ms, = map(self, expression.children) + result = MonomialSum() + for k, v in child_ms.monomials.items(): + result.monomials[k] = MathFunction(name, v) + result.ordering = child_ms.ordering.copy() + return result + else: + return _collect_monomials.dispatch(MathFunction.mro()[1])(expression, self) + + +@_collect_monomials.register(Conditional) +def _collect_monomials_conditional(expression, self): + """Refactorises a conditional expression into a sum-of-products form, + pulling only "atomics" out of conditional expressions. + + :arg expression: a GEM expression to refactorise + :arg self: function for recursive calls + + :returns: :py:class:`MonomialSum` + """ + condition, then, else_ = expression.children + # Recursively refactorise both branches to `MonomialSum`s + then_ms = self(then) + else_ms = self(else_) + + result = MonomialSum() + # For each set of atomics, create a new Conditional node. Atomics + # are considered safe to be pulled out of conditionals, but other + # expressions remain inside conditional branches. + zero = Zero() + for k in then_ms.monomials.keys() | else_ms.monomials.keys(): + _then = then_ms.monomials.get(k, zero) + _else = else_ms.monomials.get(k, zero) + result.monomials[k] = Conditional(condition, _then, _else) + + # Construct a deterministic ordering + result.ordering = then_ms.ordering.copy() + for k, v in else_ms.ordering.items(): + result.ordering.setdefault(k, v) + return result + + +def collect_monomials(expressions, classifier): + """Refactorises expressions into a sum-of-products form, using + distributivity rules (i.e. a*(b + c) -> a*b + a*c). Expansion + proceeds until all "compound" expressions are broken up. + + :arg expressions: GEM expressions to refactorise + :arg classifier: a function that can classify any GEM expression + as ``ATOMIC``, ``COMPOUND``, or ``OTHER``. This + classification drives the factorisation. + + :returns: list of :py:class:`MonomialSum`s + + :raises FactorisationError: Failed to break up some "compound" + expressions with expansion. + """ + # Get ComponentTensors out of the way + expressions = remove_componenttensors(expressions) + + # Get ListTensors out of the way + must_unroll = [] # indices to unroll + for node in traversal(expressions): + if isinstance(node, Indexed): + child, = node.children + if isinstance(child, ListTensor) and classifier(node) == COMPOUND: + must_unroll.extend(node.multiindex) + if must_unroll: + must_unroll = set(must_unroll) + expressions = unroll_indexsum(expressions, + predicate=lambda i: i in must_unroll) + expressions = remove_componenttensors(expressions) + + # Finally, refactorise expressions + mapper = Memoizer(_collect_monomials) + mapper.classifier = classifier + mapper.rename_map = make_rename_map() + return list(map(mapper, expressions)) diff --git a/gem/scheduling.py b/gem/scheduling.py new file mode 100644 index 000000000..41039a0e6 --- /dev/null +++ b/gem/scheduling.py @@ -0,0 +1,202 @@ +"""Schedules operations to evaluate a multi-root expression DAG, +forming an ordered list of Impero terminals.""" + +import collections +import functools +import itertools + +from gem import gem, impero +from gem.node import collect_refcount + + +class OrderedDefaultDict(collections.OrderedDict): + """A dictionary that provides a default value and ordered iteration. + + :arg factory: The callable used to create the default value. + + See :class:`collections.OrderedDict` for description of the + remaining arguments. + """ + def __init__(self, factory, *args, **kwargs): + self.factory = factory + super(OrderedDefaultDict, self).__init__(*args, **kwargs) + + def __missing__(self, key): + val = self[key] = self.factory() + return val + + +class ReferenceStager(object): + """Provides staging for nodes in reference counted expression + DAGs. A callback function is called once the reference count is + exhausted.""" + + def __init__(self, reference_count, callback): + """Initialises a ReferenceStager. + + :arg reference_count: initial reference counts for all + expected nodes + :arg callback: function to call on each node when + reference count is exhausted + """ + self.waiting = reference_count.copy() + self.callback = callback + + def decref(self, o): + """Decreases the reference count of a node, and possibly + triggering a callback (when the reference count drops to + zero).""" + assert 1 <= self.waiting[o] + + self.waiting[o] -= 1 + if self.waiting[o] == 0: + self.callback(o) + + def empty(self): + """All reference counts exhausted?""" + return not any(self.waiting.values()) + + +class Queue(object): + """Special queue for operation scheduling. GEM / Impero nodes are + inserted when they are ready to be scheduled, i.e. any operation + which depends on the operation to be inserted must have been + scheduled already. This class implements a heuristic for ordering + operations within the constraints in a way which aims to achieve + maximum loop fusion to minimise the size of temporaries which need + to be introduced. + """ + def __init__(self, callback): + """Initialises a Queue. + + :arg callback: function called on each element "popped" from the queue + """ + # Must have deterministic iteration over the queue + self.queue = OrderedDefaultDict(list) + self.callback = callback + + def insert(self, indices, elem): + """Insert element into queue. + + :arg indices: loop indices used by the scheduling heuristic + :arg elem: element to be scheduled + """ + self.queue[indices].append(elem) + + def process(self): + """Pops elements from the queue and calls the callback + function on them until the queue is empty. The callback + function can insert further elements into the queue. + """ + indices = () + while self.queue: + # Find innermost non-empty outer loop + while indices not in (i[:len(indices)] for i in self.queue.keys()): + indices = indices[:-1] + + # Pick a loop + for i in self.queue.keys(): + if i[:len(indices)] == indices: + indices = i + break + + while self.queue[indices]: + self.callback(self.queue[indices].pop()) + del self.queue[indices] + + +def handle(ops, push, decref, node): + """Helper function for scheduling""" + if isinstance(node, gem.Variable): + # Declared in the kernel header + pass + elif isinstance(node, gem.Constant): + # Constant literals inlined, unless tensor-valued + if node.shape: + ops.append(impero.Evaluate(node)) + elif isinstance(node, gem.Zero): # should rarely happen + assert not node.shape + elif isinstance(node, (gem.Indexed, gem.FlexiblyIndexed)): + if node.indirect_children: + # Do not inline; + # Index expression can be involved if it contains VariableIndex. + ops.append(impero.Evaluate(node)) + for child in itertools.chain(node.children, node.indirect_children): + decref(child) + elif isinstance(node, gem.IndexSum): + ops.append(impero.Noop(node)) + push(impero.Accumulate(node)) + elif isinstance(node, gem.Node): + ops.append(impero.Evaluate(node)) + for child in node.children: + decref(child) + elif isinstance(node, impero.Initialise): + ops.append(node) + elif isinstance(node, impero.Accumulate): + ops.append(node) + push(impero.Initialise(node.indexsum)) + decref(node.indexsum.children[0]) + elif isinstance(node, impero.Return): + ops.append(node) + decref(node.expression) + elif isinstance(node, impero.ReturnAccumulate): + ops.append(node) + decref(node.indexsum.children[0]) + else: + raise AssertionError("no handler for node type %s" % type(node)) + + +def emit_operations(assignments, get_indices, emit_return_accumulate=True): + """Makes an ordering of operations to evaluate a multi-root + expression DAG. + + :arg assignments: Iterable of (variable, expression) pairs. + The value of expression is written into variable + upon execution. + :arg get_indices: mapping from GEM nodes to an ordering of free + indices + :arg emit_return_accumulate: emit ReturnAccumulate nodes? Set to + False if the output variables are not guaranteed + zero on entry to the kernel. + :returns: list of Impero terminals correctly ordered to evaluate + the assignments + """ + # Prepare reference counts + refcount = collect_refcount([e for v, e in assignments]) + + # Stage return operations + staging = [] + for variable, expression in assignments: + if emit_return_accumulate and \ + refcount[expression] == 1 and isinstance(expression, gem.IndexSum) \ + and set(variable.free_indices) == set(expression.free_indices): + staging.append(impero.ReturnAccumulate(variable, expression)) + refcount[expression] -= 1 + else: + staging.append(impero.Return(variable, expression)) + + # Prepare data structures + def push_node(node): + queue.insert(get_indices(node), node) + + def push_op(op): + queue.insert(op.loop_shape(get_indices), op) + + ops = [] + + stager = ReferenceStager(refcount, push_node) + queue = Queue(functools.partial(handle, ops, push_op, stager.decref)) + + # Enqueue return operations + for op in staging: + push_op(op) + + # Schedule operations + queue.process() + + # Assert that nothing left unprocessed + assert stager.empty() + + # Return + ops.reverse() + return ops diff --git a/gem/unconcatenate.py b/gem/unconcatenate.py new file mode 100644 index 000000000..ce6e30b3c --- /dev/null +++ b/gem/unconcatenate.py @@ -0,0 +1,270 @@ +"""Utility functions for decomposing Concatenate nodes. + +The exported functions are flatten and unconcatenate. +- flatten: destroys the structure preserved within Concatenate nodes, + essentially reducing FInAT provided tabulations to what + FIAT could have provided, so old code can continue to work. +- unconcatenate: split up (variable, expression) pairs along + Concatenate nodes, thus recovering the structure + within them, yet eliminating the Concatenate nodes. + +Let us see an example on unconcatenate. Let us consider the form + + div(v) * dx + +where v is an RTCF7 test function. This means that the assembled +local vector has 8 * 7 + 7 * 8 = 112 entries. So the compilation of +the form starts with a single assignment pair [(v, e)]. v is now the +indexed return variable, something equivalent to + + Indexed(Variable('A', (112,)), (j,)) + +where j is the basis function index of the argument. e is just a GEM +quadrature expression with j as its only free index. This will +contain the tabulation of the RTCF7 element, which will cause +something like + + C_j := Indexed(Concatenate(A, B), (j,)) + +to appear as a subexpression in e. unconcatenate splits e along C_j +into e_1 and e_2 such that + + e_1 := e /. C_j -> A_{ja1,ja2}, and + e_2 := e /. C_j -> B_{jb1,jb2}. + +The split indices ja1, ja2, jb1, and jb2 have extents 8, 7, 7, and 8 +respectively (see the RTCF7 element construction above). So the +result of unconcatenate will be the list of pairs + + [(v_1, e_2), (v_2, e_2)] + +where v_1 is the first 56 entries of v, reshaped as an 8 x 7 matrix, +indexed with (ja1, ja2), and similarly, v_2 is the second 56 entries +of v, reshaped as a 7 x 8 matrix, indexed with (jb1, jb2). + +The unconcatenated form allows for sum factorisation of tensor product +elements as usual. This pair splitting is also applicable to +coefficient evaluation: take the local basis function coefficients as +the variable, the FInAT tabulation of the element as the expression, +and apply "matrix-vector multifunction" for each pair after +unconcatenation, and then add up the results. +""" + +from functools import singledispatch +from itertools import chain + +import numpy + +from gem.node import Memoizer, reuse_if_untouched +from gem.gem import (ComponentTensor, Concatenate, FlexiblyIndexed, + Index, Indexed, Literal, Node, partial_indexed, + reshape, view) +from gem.optimise import remove_componenttensors +from gem.interpreter import evaluate + + +__all__ = ['flatten', 'unconcatenate'] + + +def find_group(expressions): + """Finds a full set of indexed Concatenate nodes with the same + free index, if any such node exists. + + Pre-condition: ComponentTensor nodes surrounding Concatenate nodes + must be removed. + + :arg expressions: a multi-root GEM expression DAG + :returns: a list of GEM nodes, or None + """ + free_indices = set().union(chain(*[e.free_indices for e in expressions])) + + # Result variables + index = None + nodes = [] + + # Sui generis pre-order traversal so that we can avoid going + # unnecessarily deep in the DAG. + seen = set() + lifo = [] + for root in expressions: + if root not in seen: + seen.add(root) + lifo.append(root) + + while lifo: + node = lifo.pop() + if not free_indices.intersection(node.free_indices): + continue + + if isinstance(node, Indexed): + child, = node.children + if isinstance(child, Concatenate): + i, = node.multiindex + assert i in free_indices + if (index or i) == i: + index = i + nodes.append(node) + # Skip adding children + continue + + for child in reversed(node.children): + if child not in seen: + seen.add(child) + lifo.append(child) + + return index and nodes + + +def split_variable(variable_ref, index, multiindices): + """Splits a flexibly indexed variable along a concatenation index. + + :param variable_ref: flexibly indexed variable to split + :param index: :py:class:`Concatenate` index to split along + :param multiindices: one multiindex for each split variable + + :returns: generator of split indexed variables + """ + assert isinstance(variable_ref, FlexiblyIndexed) + other_indices = list(variable_ref.index_ordering()) + other_indices.remove(index) + other_indices = tuple(other_indices) + data = ComponentTensor(variable_ref, (index,) + other_indices) + slices = [slice(None)] * len(other_indices) + shapes = [(other_index.extent,) for other_index in other_indices] + + offset = 0 + for multiindex in multiindices: + shape = tuple(index.extent for index in multiindex) + size = numpy.prod(shape, dtype=int) + slice_ = slice(offset, offset + size) + offset += size + + sub_ref = Indexed(reshape(view(data, slice_, *slices), + shape, *shapes), + multiindex + other_indices) + sub_ref, = remove_componenttensors((sub_ref,)) + yield sub_ref + + +def _replace_node(node, self): + """Replace subexpressions using a given mapping. + + :param node: root of expression + :param self: function for recursive calls + """ + assert isinstance(node, Node) + if self.cut(node): + return node + try: + return self.mapping[node] + except KeyError: + return reuse_if_untouched(node, self) + + +def replace_node(expression, mapping, cut=None): + """Replace subexpressions using a given mapping. + + :param expression: a GEM expression + :param mapping: a :py:class:`dict` containing the substitutions + :param cut: cutting predicate; if returns true, it is assumed that + no replacements would take place in the subexpression. + """ + mapper = Memoizer(_replace_node) + mapper.mapping = mapping + mapper.cut = cut or (lambda node: False) + return mapper(expression) + + +def _unconcatenate(cache, pairs): + # Tail-call recursive core of unconcatenate. + # Assumes that input has already been sanitised. + concat_group = find_group([e for v, e in pairs]) + if concat_group is None: + return pairs + + # Get the index split + concat_ref = next(iter(concat_group)) + assert isinstance(concat_ref, Indexed) + concat_expr, = concat_ref.children + index, = concat_ref.multiindex + assert isinstance(concat_expr, Concatenate) + try: + multiindices = cache[index] + except KeyError: + multiindices = tuple(tuple(Index(extent=d) for d in child.shape) + for child in concat_expr.children) + cache[index] = multiindices + + def cut(node): + """No need to rebuild expression of independent of the + relevant concatenation index.""" + return index not in node.free_indices + + # Build Concatenate node replacement mappings + mappings = [{} for i in range(len(multiindices))] + for concat_ref in concat_group: + concat_expr, = concat_ref.children + for i in range(len(multiindices)): + sub_ref = Indexed(concat_expr.children[i], multiindices[i]) + sub_ref, = remove_componenttensors((sub_ref,)) + mappings[i][concat_ref] = sub_ref + + # Finally, split assignment pairs + split_pairs = [] + for var, expr in pairs: + if index not in var.free_indices: + split_pairs.append((var, expr)) + else: + for v, m in zip(split_variable(var, index, multiindices), mappings): + split_pairs.append((v, replace_node(expr, m, cut))) + + # Run again, there may be other Concatenate groups + return _unconcatenate(cache, split_pairs) + + +def unconcatenate(pairs, cache=None): + """Splits a list of (indexed variable, expression) pairs along + :py:class:`Concatenate` nodes embedded in the expressions. + + :param pairs: list of (indexed variable, expression) pairs + :param cache: index splitting cache :py:class:`dict` (optional) + + :returns: list of (indexed variable, expression) pairs + """ + # Set up cache + if cache is None: + cache = {} + + # Eliminate index renaming due to ComponentTensor nodes + exprs = remove_componenttensors([e for v, e in pairs]) + pairs = [(v, e) for (v, _), e in zip(pairs, exprs)] + + return _unconcatenate(cache, pairs) + + +@singledispatch +def _flatten(node, self): + """Replace Concatenate nodes with Literal nodes. + + :arg node: root of the expression + :arg self: function for recursive calls + """ + raise AssertionError("cannot handle type %s" % type(node)) + + +_flatten.register(Node)(reuse_if_untouched) + + +@_flatten.register(Concatenate) +def _flatten_concatenate(node, self): + result, = evaluate([node]) + return partial_indexed(Literal(result.arr), result.fids) + + +def flatten(expressions): + """Flatten Concatenate nodes, and destroy the structure they express. + + :arg expressions: a multi-root expression DAG + """ + mapper = Memoizer(_flatten) + return list(map(mapper, expressions)) diff --git a/gem/utils.py b/gem/utils.py new file mode 100644 index 000000000..12e0e0f6c --- /dev/null +++ b/gem/utils.py @@ -0,0 +1,108 @@ +import collections + + +# This is copied from PyOP2, and it is here to be available for both +# FInAT and TSFC without depending on PyOP2. +class cached_property(object): + """A read-only @property that is only evaluated once. The value is cached + on the object itself rather than the function or class; this should prevent + memory leakage.""" + def __init__(self, fget, doc=None): + self.fget = fget + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + self.__module__ = fget.__module__ + + def __get__(self, obj, cls): + if obj is None: + return self + obj.__dict__[self.__name__] = result = self.fget(obj) + return result + + +def groupby(iterable, key=None): + """Groups objects by their keys. + + :arg iterable: an iterable + :arg key: key function + + :returns: list of (group key, list of group members) pairs + """ + if key is None: + key = lambda x: x + groups = collections.OrderedDict() + for elem in iterable: + groups.setdefault(key(elem), []).append(elem) + return groups.items() + + +def make_proxy_class(name, cls): + """Constructs a proxy class for a given class. + + :arg name: name of the new proxy class + :arg cls: the wrapee class to create a proxy for + """ + def __init__(self, wrapee): + self._wrapee = wrapee + + def make_proxy_property(name): + def getter(self): + return getattr(self._wrapee, name) + return property(getter) + + dct = {'__init__': __init__} + for attr in dir(cls): + if not attr.startswith('_'): + dct[attr] = make_proxy_property(attr) + return type(name, (), dct) + + +# Implementation of dynamically scoped variables in Python. +class UnsetVariableError(LookupError): + pass + + +_unset = object() + + +class DynamicallyScoped(object): + """A dynamically scoped variable.""" + + def __init__(self, default_value=_unset): + if default_value is _unset: + self._head = None + else: + self._head = (default_value, None) + + def let(self, value): + return _LetBlock(self, value) + + @property + def value(self): + if self._head is None: + raise UnsetVariableError("Dynamically scoped variable not set.") + result, tail = self._head + return result + + +class _LetBlock(object): + """Context manager representing a dynamic scope.""" + + def __init__(self, variable, value): + self.variable = variable + self.value = value + self.state = None + + def __enter__(self): + assert self.state is None + value = self.value + tail = self.variable._head + scope = (value, tail) + self.variable._head = scope + self.state = scope + + def __exit__(self, exc_type, exc_value, traceback): + variable = self.variable + assert self.state is variable._head + value, variable._head = variable._head + self.state = None diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..6e10c7b8a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,39 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +name = "fenics-fiat" +version = "2024.0.0" +dependencies = [ + "numpy>=1.16", + "recursivenodes", + "scipy", + "symengine", + "sympy", + "fenics-ufl @ git+https://github.com/firedrakeproject/ufl.git", +] +requires-python = ">=3.10" +authors = [ + {name = "Robert C. Kirby et al.", email = "fenics-dev@googlegroups.com"}, + {name = "Imperial College London and others", email = "david.ham@imperial.ac.uk"}, +] +description = "FInite element Automatic Tabulator" +readme = "README.rst" +classifiers = [ + "Programming Language :: Python", + "License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)", +] + +[project.urls] +Repository = "https://github.com/firedrakeproject/fiat.git" + +[project.optional-dependencies] +doc = [ + "setuptools", # for pkg_resources + "sphinx", +] +test = ["pytest"] + +[tool.setuptools] +packages = ["FIAT", "finat", "finat.ufl", "gem"] diff --git a/setup.cfg b/setup.cfg index fdc13e4fb..d5f41d15c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,8 @@ [flake8] -ignore = E501,E226,E731,W504, +ignore = E501,E226,E731,W503,W504, # ambiguous variable name E741 -exclude = .git,__pycache__,doc/sphinx/source/conf.py,build,dist +exclude = .git,__pycache__,docs/source/conf.py,build,dist min-version = 3.0 [pydocstyle] diff --git a/setup.py b/setup.py deleted file mode 100755 index d55401a90..000000000 --- a/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python - -import sys - -try: - from setuptools import setup -except ImportError: - from distutils.core import setup - -if sys.version_info < (3, 0): - print("Python 3.0 or higher required, please upgrade.") - sys.exit(1) - -version = "2019.2.0.dev0" - -url = "https://bitbucket.org/fenics-project/fiat/" -tarball = None -if 'dev' not in version: - tarball = url + "downloads/fenics-fiat-%s.tar.gz" % version - -setup( - name="fenics-fiat", - description="FInite element Automatic Tabulator", - version=version, - author="Robert C. Kirby et al.", - author_email="fenics-dev@googlegroups.com", - url=url, - download_url=tarball, - license="LGPL v3 or later", - packages=["FIAT"], - install_requires=[ - "setuptools", "numpy", "recursivenodes", "scipy", "sympy" - ] -) diff --git a/test/regression/.gitignore b/test/FIAT/regression/.gitignore similarity index 100% rename from test/regression/.gitignore rename to test/FIAT/regression/.gitignore diff --git a/test/regression/README.rst b/test/FIAT/regression/README.rst similarity index 100% rename from test/regression/README.rst rename to test/FIAT/regression/README.rst diff --git a/test/regression/conftest.py b/test/FIAT/regression/conftest.py similarity index 100% rename from test/regression/conftest.py rename to test/FIAT/regression/conftest.py diff --git a/test/regression/fiat-reference-data-id b/test/FIAT/regression/fiat-reference-data-id similarity index 100% rename from test/regression/fiat-reference-data-id rename to test/FIAT/regression/fiat-reference-data-id diff --git a/test/regression/scripts/download b/test/FIAT/regression/scripts/download similarity index 100% rename from test/regression/scripts/download rename to test/FIAT/regression/scripts/download diff --git a/test/regression/scripts/getdata b/test/FIAT/regression/scripts/getdata similarity index 100% rename from test/regression/scripts/getdata rename to test/FIAT/regression/scripts/getdata diff --git a/test/regression/scripts/getreferencerepo b/test/FIAT/regression/scripts/getreferencerepo similarity index 100% rename from test/regression/scripts/getreferencerepo rename to test/FIAT/regression/scripts/getreferencerepo diff --git a/test/regression/scripts/parameters b/test/FIAT/regression/scripts/parameters similarity index 100% rename from test/regression/scripts/parameters rename to test/FIAT/regression/scripts/parameters diff --git a/test/regression/scripts/upload b/test/FIAT/regression/scripts/upload similarity index 100% rename from test/regression/scripts/upload rename to test/FIAT/regression/scripts/upload diff --git a/test/regression/test_regression.py b/test/FIAT/regression/test_regression.py similarity index 100% rename from test/regression/test_regression.py rename to test/FIAT/regression/test_regression.py diff --git a/test/unit/test_argyris.py b/test/FIAT/unit/test_argyris.py similarity index 100% rename from test/unit/test_argyris.py rename to test/FIAT/unit/test_argyris.py diff --git a/test/unit/test_awc.py b/test/FIAT/unit/test_awc.py similarity index 100% rename from test/unit/test_awc.py rename to test/FIAT/unit/test_awc.py diff --git a/test/unit/test_awnc.py b/test/FIAT/unit/test_awnc.py similarity index 100% rename from test/unit/test_awnc.py rename to test/FIAT/unit/test_awnc.py diff --git a/test/unit/test_bernstein.py b/test/FIAT/unit/test_bernstein.py similarity index 100% rename from test/unit/test_bernstein.py rename to test/FIAT/unit/test_bernstein.py diff --git a/test/unit/test_discontinuous_pc.py b/test/FIAT/unit/test_discontinuous_pc.py similarity index 100% rename from test/unit/test_discontinuous_pc.py rename to test/FIAT/unit/test_discontinuous_pc.py diff --git a/test/unit/test_discontinuous_taylor.py b/test/FIAT/unit/test_discontinuous_taylor.py similarity index 100% rename from test/unit/test_discontinuous_taylor.py rename to test/FIAT/unit/test_discontinuous_taylor.py diff --git a/test/unit/test_facet_support_dofs.py b/test/FIAT/unit/test_facet_support_dofs.py similarity index 100% rename from test/unit/test_facet_support_dofs.py rename to test/FIAT/unit/test_facet_support_dofs.py diff --git a/test/unit/test_fdm.py b/test/FIAT/unit/test_fdm.py similarity index 100% rename from test/unit/test_fdm.py rename to test/FIAT/unit/test_fdm.py diff --git a/test/unit/test_fiat.py b/test/FIAT/unit/test_fiat.py similarity index 100% rename from test/unit/test_fiat.py rename to test/FIAT/unit/test_fiat.py diff --git a/test/unit/test_gauss_legendre.py b/test/FIAT/unit/test_gauss_legendre.py similarity index 100% rename from test/unit/test_gauss_legendre.py rename to test/FIAT/unit/test_gauss_legendre.py diff --git a/test/unit/test_gauss_lobatto_legendre.py b/test/FIAT/unit/test_gauss_lobatto_legendre.py similarity index 100% rename from test/unit/test_gauss_lobatto_legendre.py rename to test/FIAT/unit/test_gauss_lobatto_legendre.py diff --git a/test/unit/test_gauss_radau.py b/test/FIAT/unit/test_gauss_radau.py similarity index 100% rename from test/unit/test_gauss_radau.py rename to test/FIAT/unit/test_gauss_radau.py diff --git a/test/unit/test_gopalakrishnan_lederer_schoberl.py b/test/FIAT/unit/test_gopalakrishnan_lederer_schoberl.py similarity index 100% rename from test/unit/test_gopalakrishnan_lederer_schoberl.py rename to test/FIAT/unit/test_gopalakrishnan_lederer_schoberl.py diff --git a/test/unit/test_hct.py b/test/FIAT/unit/test_hct.py similarity index 100% rename from test/unit/test_hct.py rename to test/FIAT/unit/test_hct.py diff --git a/test/unit/test_hdivtrace.py b/test/FIAT/unit/test_hdivtrace.py similarity index 100% rename from test/unit/test_hdivtrace.py rename to test/FIAT/unit/test_hdivtrace.py diff --git a/test/unit/test_hierarchical.py b/test/FIAT/unit/test_hierarchical.py similarity index 100% rename from test/unit/test_hierarchical.py rename to test/FIAT/unit/test_hierarchical.py diff --git a/test/unit/test_johnson_mercier.py b/test/FIAT/unit/test_johnson_mercier.py similarity index 100% rename from test/unit/test_johnson_mercier.py rename to test/FIAT/unit/test_johnson_mercier.py diff --git a/test/unit/test_kong_mulder_veldhuizen.py b/test/FIAT/unit/test_kong_mulder_veldhuizen.py similarity index 100% rename from test/unit/test_kong_mulder_veldhuizen.py rename to test/FIAT/unit/test_kong_mulder_veldhuizen.py diff --git a/test/unit/test_macro.py b/test/FIAT/unit/test_macro.py similarity index 100% rename from test/unit/test_macro.py rename to test/FIAT/unit/test_macro.py diff --git a/test/unit/test_mtw.py b/test/FIAT/unit/test_mtw.py similarity index 100% rename from test/unit/test_mtw.py rename to test/FIAT/unit/test_mtw.py diff --git a/test/unit/test_orientation.py b/test/FIAT/unit/test_orientation.py similarity index 100% rename from test/unit/test_orientation.py rename to test/FIAT/unit/test_orientation.py diff --git a/test/unit/test_pointwise_dual.py b/test/FIAT/unit/test_pointwise_dual.py similarity index 100% rename from test/unit/test_pointwise_dual.py rename to test/FIAT/unit/test_pointwise_dual.py diff --git a/test/unit/test_polynomial.py b/test/FIAT/unit/test_polynomial.py similarity index 100% rename from test/unit/test_polynomial.py rename to test/FIAT/unit/test_polynomial.py diff --git a/test/unit/test_powell_sabin.py b/test/FIAT/unit/test_powell_sabin.py similarity index 100% rename from test/unit/test_powell_sabin.py rename to test/FIAT/unit/test_powell_sabin.py diff --git a/test/unit/test_quadrature.py b/test/FIAT/unit/test_quadrature.py similarity index 100% rename from test/unit/test_quadrature.py rename to test/FIAT/unit/test_quadrature.py diff --git a/test/unit/test_quadrature_element.py b/test/FIAT/unit/test_quadrature_element.py similarity index 100% rename from test/unit/test_quadrature_element.py rename to test/FIAT/unit/test_quadrature_element.py diff --git a/test/unit/test_reference_element.py b/test/FIAT/unit/test_reference_element.py similarity index 100% rename from test/unit/test_reference_element.py rename to test/FIAT/unit/test_reference_element.py diff --git a/test/unit/test_regge_hhj.py b/test/FIAT/unit/test_regge_hhj.py similarity index 100% rename from test/unit/test_regge_hhj.py rename to test/FIAT/unit/test_regge_hhj.py diff --git a/test/unit/test_serendipity.py b/test/FIAT/unit/test_serendipity.py similarity index 100% rename from test/unit/test_serendipity.py rename to test/FIAT/unit/test_serendipity.py diff --git a/test/unit/test_stokes_complex.py b/test/FIAT/unit/test_stokes_complex.py similarity index 100% rename from test/unit/test_stokes_complex.py rename to test/FIAT/unit/test_stokes_complex.py diff --git a/test/unit/test_tensor_product.py b/test/FIAT/unit/test_tensor_product.py similarity index 100% rename from test/unit/test_tensor_product.py rename to test/FIAT/unit/test_tensor_product.py diff --git a/test/README b/test/README deleted file mode 100644 index 6a6f779f4..000000000 --- a/test/README +++ /dev/null @@ -1,6 +0,0 @@ -Run tests by:: - - py.test [--skip-download] - py.test [--skip-download] regression/ - py.test unit/ - py.test unit/foo.py diff --git a/test/finat/fiat_mapping.py b/test/finat/fiat_mapping.py new file mode 100644 index 000000000..d857b2577 --- /dev/null +++ b/test/finat/fiat_mapping.py @@ -0,0 +1,72 @@ +import FIAT +import gem +import numpy as np +from finat.physically_mapped import PhysicalGeometry + + +class MyMapping(PhysicalGeometry): + def __init__(self, ref_cell, phys_cell): + self.ref_cell = ref_cell + self.phys_cell = phys_cell + + self.A, self.b = FIAT.reference_element.make_affine_mapping( + self.ref_cell.vertices, + self.phys_cell.vertices) + + def cell_size(self): + # Firedrake interprets this as 2x the circumradius + # cs = (np.prod([self.phys_cell.volume_of_subcomplex(1, i) + # for i in range(3)]) + # / 2.0 / self.phys_cell.volume()) + # return np.asarray([cs for _ in range(3)]) + # Currently, just return 1 so we can compare FIAT dofs + # to transformed dofs. + + return np.ones((3,)) + + def detJ_at(self, point): + return gem.Literal(np.linalg.det(self.A)) + + def jacobian_at(self, point): + return gem.Literal(self.A) + + def normalized_reference_edge_tangents(self): + return gem.Literal(np.asarray([self.ref_cell.compute_normalized_edge_tangent(i) for i in range(3)])) + + def reference_normals(self): + return gem.Literal( + np.asarray([self.ref_cell.compute_normal(i) + for i in range(3)])) + + def physical_normals(self): + return gem.Literal( + np.asarray([self.phys_cell.compute_normal(i) + for i in range(3)])) + + def physical_tangents(self): + return gem.Literal( + np.asarray([self.phys_cell.compute_normalized_edge_tangent(i) + for i in range(3)])) + + def physical_edge_lengths(self): + return gem.Literal( + np.asarray([self.phys_cell.volume_of_subcomplex(1, i) + for i in range(3)])) + + def physical_points(self, ps, entity=None): + prefs = ps.points + A, b = self.A, self.b + return gem.Literal(np.asarray([A @ x + b for x in prefs])) + + def physical_vertices(self): + return gem.Literal(self.phys_cell.verts) + + +class FiredrakeMapping(MyMapping): + + def cell_size(self): + # Firedrake interprets this as 2x the circumradius + cs = (np.prod([self.phys_cell.volume_of_subcomplex(1, i) + for i in range(3)]) + / 2.0 / self.phys_cell.volume()) + return np.asarray([cs for _ in range(3)]) diff --git a/test/finat/test_direct_serendipity.py b/test/finat/test_direct_serendipity.py new file mode 100644 index 000000000..807edd1bf --- /dev/null +++ b/test/finat/test_direct_serendipity.py @@ -0,0 +1,83 @@ +import pytest +import FIAT +import finat +import gem +import numpy as np +from finat.physically_mapped import PhysicalGeometry + + +class MyMapping(PhysicalGeometry): + def __init__(self, cell, verts): + # cell is reference cell, verts is physical vertices + self.verts = np.asarray(verts) + self.cell = cell + + def cell_size(self): + raise NotImplementedError + + def jacobian_at(self, point): + raise NotImplementedError + + def detJ_at(self, point): + raise NotImplementedError + + def reference_normals(self): + raise NotImplementedError + + def physical_normals(self): + raise NotImplementedError + + def physical_tangents(self): + raise NotImplementedError + + def physical_edge_lengths(self): + raise NotImplementedError + + def physical_points(self, ps, entity=None): + assert entity is None + prefs = ps.points + pvs = self.verts + pps = np.zeros(prefs.shape, dtype=float) + for i in range(pps.shape[0]): + pps[i, :] = (pvs[0, :] * (1-prefs[i, 0]) * (1-prefs[i, 1]) + + pvs[1, :] * (1-prefs[i, 0]) * prefs[i, 1] + + pvs[2, :] * prefs[i, 0] * (1-prefs[i, 1]) + + pvs[3, :] * prefs[i, 0] * prefs[i, 1]) + return gem.Literal(pps) + + def physical_vertices(self): + return gem.Literal(self.verts) + + +def get_pts(cell, deg): + assert cell.shape == FIAT.reference_element.QUADRILATERAL + L = cell.construct_subelement(1) + vs = np.asarray(cell.vertices) + pts = [pt for pt in cell.vertices] + Lpts = FIAT.reference_element.make_lattice(L.vertices, deg, 1) + for e in cell.topology[1]: + Fmap = cell.get_entity_transform(1, e) + epts = [tuple(Fmap(pt)) for pt in Lpts] + pts.extend(epts) + if deg > 3: + dx0 = (vs[1, :] - vs[0, :]) / (deg-2) + dx1 = (vs[2, :] - vs[0, :]) / (deg-2) + + internal_nodes = [tuple(vs[0, :] + dx0 * i + dx1 * j) + for i in range(1, deg-2) + for j in range(1, deg-1-i)] + pts.extend(internal_nodes) + return pts + + +@pytest.mark.parametrize('degree', [1, 2, 3, 4]) +def test_kronecker(degree): + cell = FIAT.ufc_cell("quadrilateral") + element = finat.DirectSerendipity(cell, degree) + pts = finat.point_set.PointSet(get_pts(cell, degree)) + vertices = np.asarray(((0.0, 0.0), (1.0, 0.0), (0.1, 1.1), (0.95, 1.01))) + mapping = MyMapping(cell, vertices) + z = tuple([0] * cell.get_spatial_dimension()) + vals = element.basis_evaluation(0, pts, coordinate_mapping=mapping)[z] + numvals = gem.interpreter.evaluate([vals])[0].arr + assert np.allclose(numvals, np.eye(*numvals.shape)) diff --git a/test/finat/test_hash.py b/test/finat/test_hash.py new file mode 100644 index 000000000..d74f0ecd4 --- /dev/null +++ b/test/finat/test_hash.py @@ -0,0 +1,53 @@ +import os +import sys +import subprocess +import textwrap + +import ufl +import finat.ufl + + +def test_same_hash(): + """ The same element created twice should have the same hash. + """ + cg = finat.ufl.finiteelement.FiniteElement("Lagrange", ufl.cell.Cell("triangle"), 1) + same_cg = finat.ufl.finiteelement.FiniteElement("Lagrange", ufl.cell.Cell("triangle"), 1) + assert hash(cg) == hash(same_cg) + + +def test_different_hash(): + """ Two different elements should have different hashes. + """ + cg = finat.ufl.finiteelement.FiniteElement("Lagrange", ufl.cell.Cell("triangle"), 1) + dg = finat.ufl.finiteelement.FiniteElement("DG", ufl.cell.Cell("triangle"), 2) + assert hash(cg) != hash(dg) + + +def test_variant_hashes_different(): + """ Different variants of the same element should have different hashes. + """ + dg = finat.ufl.finiteelement.FiniteElement("DG", ufl.cell.Cell("triangle"), 2) + dg_gll = finat.ufl.finiteelement.FiniteElement("DG", ufl.cell.Cell("triangle"), 2, variant="gll") + assert hash(dg) != hash(dg_gll) + + +def test_persistent_hash(tmp_path): + """ Hashes should be the same across Python invocations. + """ + filename = "print_hash.py" + code = textwrap.dedent("""\ + import ufl + import finat.ufl + + dg = finat.ufl.finiteelement.FiniteElement("RT", ufl.cell.Cell("triangle"), 1) + print(hash(dg)) + """) + filepath = tmp_path.joinpath(filename) + with open(filepath, "w") as fh: + fh.write(code) + + output1 = subprocess.run([sys.executable, filepath], capture_output=True) + assert output1.returncode == os.EX_OK + output2 = subprocess.run([sys.executable, filepath], capture_output=True) + assert output2.returncode == os.EX_OK + assert output1.stdout == output2.stdout diff --git a/test/finat/test_mass_conditioning.py b/test/finat/test_mass_conditioning.py new file mode 100644 index 000000000..cc4eafcaa --- /dev/null +++ b/test/finat/test_mass_conditioning.py @@ -0,0 +1,53 @@ +import FIAT +import finat +import numpy as np +import pytest +from gem.interpreter import evaluate + +from fiat_mapping import FiredrakeMapping + + +@pytest.mark.parametrize("element, degree, variant", [ + (finat.Hermite, 3, None), + (finat.QuadraticPowellSabin6, 2, None), + (finat.QuadraticPowellSabin12, 2, None), + (finat.ReducedHsiehCloughTocher, 3, None), + (finat.HsiehCloughTocher, 3, None), + (finat.HsiehCloughTocher, 4, None), + (finat.Bell, 5, None), + (finat.Argyris, 5, "point"), + (finat.Argyris, 5, None), + (finat.Argyris, 6, None), +]) +def test_mass_scaling(element, degree, variant): + sd = 2 + ref_cell = FIAT.ufc_simplex(sd) + if variant is not None: + ref_element = element(ref_cell, degree, variant=variant) + else: + ref_element = element(ref_cell, degree) + + Q = finat.quadrature.make_quadrature(ref_cell, 2*degree) + qpts = Q.point_set + qwts = Q.weights + + kappa = [] + for k in range(3): + h = 2 ** -k + phys_cell = FIAT.ufc_simplex(2) + new_verts = h * np.array(phys_cell.get_vertices()) + phys_cell.vertices = tuple(map(tuple, new_verts)) + mapping = FiredrakeMapping(ref_cell, phys_cell) + J_gem = mapping.jacobian_at(ref_cell.make_points(sd, 0, sd+1)[0]) + J = evaluate([J_gem])[0].arr + + z = (0,) * ref_element.cell.get_spatial_dimension() + finat_vals_gem = ref_element.basis_evaluation(0, qpts, coordinate_mapping=mapping)[z] + phis = evaluate([finat_vals_gem])[0].arr.T + + M = np.dot(np.multiply(phis, qwts * abs(np.linalg.det(J))), phis.T) + kappa.append(np.linalg.cond(M)) + + kappa = np.array(kappa) + ratio = kappa[1:] / kappa[:-1] + assert np.allclose(ratio, 1, atol=0.1) diff --git a/test/finat/test_point_evaluation_ciarlet.py b/test/finat/test_point_evaluation_ciarlet.py new file mode 100644 index 000000000..5e13d918b --- /dev/null +++ b/test/finat/test_point_evaluation_ciarlet.py @@ -0,0 +1,31 @@ +import pytest + +import FIAT +import finat +import gem + + +@pytest.fixture(params=[1, 2, 3]) +def cell(request): + dim = request.param + return FIAT.ufc_simplex(dim) + + +@pytest.mark.parametrize('degree', [1, 2]) +def test_cellwise_constant(cell, degree): + dim = cell.get_spatial_dimension() + element = finat.Lagrange(cell, degree) + index = gem.Index() + point = gem.partial_indexed(gem.Variable('X', (17, dim)), (index,)) + + order = 2 + for alpha, table in element.point_evaluation(order, point).items(): + if sum(alpha) < degree: + assert table.free_indices == (index,) + else: + assert table.free_indices == () + + +if __name__ == '__main__': + import os + pytest.main(os.path.abspath(__file__)) diff --git a/test/finat/test_restriction.py b/test/finat/test_restriction.py new file mode 100644 index 000000000..443292a04 --- /dev/null +++ b/test/finat/test_restriction.py @@ -0,0 +1,144 @@ +import FIAT +import finat +import numpy +import pytest +from finat.point_set import PointSet +from finat.restricted import r_to_codim +from gem.interpreter import evaluate + + +def tabulate(element, ps): + tabulation, = element.basis_evaluation(0, ps).values() + result, = evaluate([tabulation]) + # Singleton point + shape = (int(numpy.prod(element.index_shape)), ) + element.value_shape + return result.arr.reshape(*shape) + + +def which_dofs(element, restricted): + edofs = element.entity_dofs() + rdofs = restricted.entity_dofs() + keep_e = [] + keep_r = [] + for k in edofs.keys(): + for e, indices in edofs[k].items(): + if rdofs[k][e]: + assert len(rdofs[k][e]) == len(indices) + keep_e.extend(indices) + keep_r.extend(rdofs[k][e]) + return keep_e, keep_r + + +@pytest.fixture(params=["vertex", "edge", "facet", "interior"], scope="module") +def restriction(request): + return request.param + + +@pytest.fixture(params=["tet", "quad", "prism"], scope="module") +def cell(request): + if request.param == "tet": + cell = (FIAT.ufc_simplex(3),) + elif request.param == "quad": + interval = FIAT.ufc_simplex(1) + cell = (interval, interval) + elif request.param == "prism": + triangle = FIAT.ufc_simplex(2) + interval = FIAT.ufc_simplex(1) + cell = (triangle, interval) + return cell + + +@pytest.fixture +def ps(cell): + dim = sum(e.get_spatial_dimension() for e in cell) + return PointSet([[1/3, 1/4, 1/5][:dim]]) + + +@pytest.fixture(scope="module") +def scalar_element(cell): + if len(cell) == 1: + return finat.Lagrange(cell[0], 4) + else: + e1, e2 = cell + return finat.FlattenedDimensions( + finat.TensorProductElement([ + finat.GaussLobattoLegendre(e1, 3), + finat.GaussLobattoLegendre(e2, 3)] + ) + ) + + +@pytest.fixture(scope="module") +def hdiv_element(cell): + if len(cell) == 1: + return finat.RaviartThomas(cell[0], 3, variant="integral(3)") + else: + e1, e2 = cell + element = finat.GaussLobattoLegendre if e1.get_spatial_dimension() == 1 else finat.RaviartThomas + return finat.FlattenedDimensions( + finat.EnrichedElement([ + finat.HDivElement( + finat.TensorProductElement([ + element(e1, 3), + finat.GaussLegendre(e2, 3)])), + finat.HDivElement( + finat.TensorProductElement([ + finat.GaussLegendre(e1, 3), + finat.GaussLobattoLegendre(e2, 3)])) + ])) + + +@pytest.fixture(scope="module") +def hcurl_element(cell): + if len(cell) == 1: + return finat.Nedelec(cell[0], 3, variant="integral(3)") + else: + e1, e2 = cell + element = finat.GaussLegendre if e1.get_spatial_dimension() == 1 else finat.Nedelec + return finat.FlattenedDimensions( + finat.EnrichedElement([ + finat.HCurlElement( + finat.TensorProductElement([ + finat.GaussLobattoLegendre(e1, 3), + finat.GaussLegendre(e2, 3)])), + finat.HCurlElement( + finat.TensorProductElement([ + element(e1, 3), + finat.GaussLobattoLegendre(e2, 3)])) + ])) + + +def run_restriction(element, restriction, ps): + try: + restricted = finat.RestrictedElement(element, restriction) + except ValueError: + # No dofs. + # Check that the original element had no dofs in all the relevant slots. + dim = element.cell.get_spatial_dimension() + lo_codim = r_to_codim(restriction, dim) + hi_codim = (lo_codim if restriction == "interior" else dim) + edofs = element.entity_dofs() + for entity_dim, dof_numbering in edofs.items(): + try: + entity_codim = dim - sum(entity_dim) + except TypeError: + entity_codim = dim - entity_dim + if lo_codim <= entity_codim <= hi_codim: + assert all(len(i) == 0 for i in dof_numbering.values()) + else: + e = tabulate(element, ps) + r = tabulate(restricted, ps) + keep_e, keep_r = which_dofs(element, restricted) + assert numpy.allclose(e[keep_e, ...], r[keep_r, ...]) + + +def test_scalar_restriction(scalar_element, restriction, ps): + run_restriction(scalar_element, restriction, ps) + + +def test_hdiv_restriction(hdiv_element, restriction, ps): + run_restriction(hdiv_element, restriction, ps) + + +def test_hcurl_restriction(hcurl_element, restriction, ps): + run_restriction(hcurl_element, restriction, ps) diff --git a/test/finat/test_zany_mapping.py b/test/finat/test_zany_mapping.py new file mode 100644 index 000000000..9444f5e24 --- /dev/null +++ b/test/finat/test_zany_mapping.py @@ -0,0 +1,184 @@ +import FIAT +import finat +import numpy as np +import pytest +from gem.interpreter import evaluate + +from fiat_mapping import MyMapping + + +def make_unisolvent_points(element, interior=False): + degree = element.degree() + ref_complex = element.get_reference_complex() + top = ref_complex.get_topology() + pts = [] + if interior: + dim = ref_complex.get_spatial_dimension() + for entity in top[dim]: + pts.extend(ref_complex.make_points(dim, entity, degree+dim+1, variant="gll")) + else: + for dim in top: + for entity in top[dim]: + pts.extend(ref_complex.make_points(dim, entity, degree, variant="gll")) + return pts + + +def check_zany_mapping(element, ref_cell, phys_cell, *args, **kwargs): + phys_element = element(phys_cell, *args, **kwargs).fiat_equivalent + finat_element = element(ref_cell, *args, **kwargs) + + ref_element = finat_element._element + ref_cell = ref_element.get_reference_element() + phys_cell = phys_element.get_reference_element() + sd = ref_cell.get_spatial_dimension() + + shape = ref_element.value_shape() + ref_pts = make_unisolvent_points(ref_element, interior=True) + ref_vals = ref_element.tabulate(0, ref_pts)[(0,)*sd] + + phys_pts = make_unisolvent_points(phys_element, interior=True) + phys_vals = phys_element.tabulate(0, phys_pts)[(0,)*sd] + + mapping = ref_element.mapping()[0] + if mapping == "affine": + ref_vals_piola = ref_vals + else: + # Piola map the reference elements + J, b = FIAT.reference_element.make_affine_mapping(ref_cell.vertices, + phys_cell.vertices) + K = [] + if "covariant" in mapping: + K.append(np.linalg.inv(J).T) + if "contravariant" in mapping: + K.append(J / np.linalg.det(J)) + + if len(shape) == 2: + piola_map = lambda x: K[0] @ x @ K[-1].T + else: + piola_map = lambda x: K[0] @ x + + ref_vals_piola = np.zeros(ref_vals.shape) + for i in range(ref_vals.shape[0]): + for k in range(ref_vals.shape[-1]): + ref_vals_piola[i, ..., k] = piola_map(ref_vals[i, ..., k]) + + # Zany map the results + num_bfs = phys_element.space_dimension() + num_dofs = finat_element.space_dimension() + mappng = MyMapping(ref_cell, phys_cell) + try: + Mgem = finat_element.basis_transformation(mappng) + M = evaluate([Mgem])[0].arr + ref_vals_zany = np.tensordot(M, ref_vals_piola, (-1, 0)) + except AttributeError: + M = np.eye(num_dofs, num_bfs) + ref_vals_zany = ref_vals_piola + + # Solve for the basis transformation and compare results + Phi = ref_vals_piola.reshape(num_bfs, -1) + phi = phys_vals.reshape(num_bfs, -1) + Vh, residual, *_ = np.linalg.lstsq(Phi.T, phi.T) + Mh = Vh.T + Mh = Mh[:num_dofs] + Mh[abs(Mh) < 1E-10] = 0.0 + M[abs(M) < 1E-10] = 0.0 + assert np.allclose(residual, 0), str(M.T - Mh.T) + assert np.allclose(ref_vals_zany, phys_vals[:num_dofs]) + + +@pytest.fixture +def ref_el(request): + K = {dim: FIAT.ufc_simplex(dim) for dim in (2, 3)} + return K + + +@pytest.fixture +def phys_el(request): + K = {dim: FIAT.ufc_simplex(dim) for dim in (2, 3)} + K[2].vertices = ((0.0, 0.1), (1.17, -0.09), (0.15, 1.84)) + K[3].vertices = ((0, 0, 0), + (1., 0.1, -0.37), + (0.01, 0.987, -.23), + (-0.1, -0.2, 1.38)) + return K + + +@pytest.mark.parametrize("element", [ + finat.Morley, + finat.Hermite, + finat.Bell, + ]) +def test_C1_elements(ref_el, phys_el, element): + check_zany_mapping(element, ref_el[2], phys_el[2]) + + +@pytest.mark.parametrize("element", [ + finat.QuadraticPowellSabin6, + finat.QuadraticPowellSabin12, + finat.ReducedHsiehCloughTocher, + ]) +def test_C1_macroelements(ref_el, phys_el, element): + kwargs = {} + if element == finat.QuadraticPowellSabin12: + kwargs = dict(avg=True) + check_zany_mapping(element, ref_el[2], phys_el[2], **kwargs) + + +@pytest.mark.parametrize("element, degree", [ + *((finat.Argyris, k) for k in range(5, 8)), + *((finat.HsiehCloughTocher, k) for k in range(3, 6)) +]) +def test_high_order_C1_elements(ref_el, phys_el, element, degree): + check_zany_mapping(element, ref_el[2], phys_el[2], degree, avg=True) + + +def test_argyris_point(ref_el, phys_el): + check_zany_mapping(finat.Argyris, ref_el[2], phys_el[2], variant="point") + + +zany_piola_elements = { + 2: [ + finat.MardalTaiWinther, + finat.ReducedArnoldQin, + finat.ArnoldWinther, + finat.ArnoldWintherNC, + ], + 3: [ + finat.BernardiRaugel, + finat.BernardiRaugelBubble, + finat.AlfeldSorokina, + finat.ChristiansenHu, + finat.JohnsonMercier, + finat.GuzmanNeilanFirstKindH1, + finat.GuzmanNeilanSecondKindH1, + finat.GuzmanNeilanBubble, + finat.GuzmanNeilanH1div, + ], +} + + +@pytest.mark.parametrize("dimension, element", [ + *((2, e) for e in zany_piola_elements[2]), + *((2, e) for e in zany_piola_elements[3]), + *((3, e) for e in zany_piola_elements[3]), + ]) +def test_piola(ref_el, phys_el, element, dimension): + check_zany_mapping(element, ref_el[dimension], phys_el[dimension]) + + +@pytest.mark.parametrize("element, degree, variant", [ + *((finat.HuZhang, k, v) for v in ("integral", "point") for k in range(3, 6)), +]) +def test_piola_triangle_high_order(ref_el, phys_el, element, degree, variant): + check_zany_mapping(element, ref_el[2], phys_el[2], degree, variant) + + +@pytest.mark.parametrize("element, degree", [ + *((finat.Regge, k) for k in range(3)), + *((finat.HellanHerrmannJohnson, k) for k in range(3)), + *((finat.GopalakrishnanLedererSchoberlFirstKind, k) for k in range(1, 4)), + *((finat.GopalakrishnanLedererSchoberlSecondKind, k) for k in range(0, 3)), + ]) +@pytest.mark.parametrize("dimension", [2, 3]) +def test_affine(ref_el, phys_el, element, degree, dimension): + check_zany_mapping(element, ref_el[dimension], phys_el[dimension], degree)