diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index fd25fbcd3..000000000
--- a/.coveragerc
+++ /dev/null
@@ -1,35 +0,0 @@
-# Don't worry about coverage for the grid GUI (for now)
-[run]
-omit =
- armi/cli/gridGui.py
- armi/utils/gridEditor.py
- armi/utils/tests/test_gridGui.py
- venv/
-source = armi
-# change default .coverage file to something that doesn't have a dot
-# because the Windows file server can't handle dots.
-data_file = coverage_results.cov
-
-[coverage:run]
-parallel = true
-
-[report]
-omit =
- armi/cli/gridGui.py
- armi/utils/gridEditor.py
- */tests/*
-
-exclude_lines =
- # Don't complain about missing debug-only code:
- def __repr__
- if self\.debug
-
- # Don't complain if tests don't hit defensive assertion code:
- raise AssertionError
- raise KeyboardInterrupt
- raise NotImplementedError
- except ImportError
- pass
-
- # Don't complain if non-runnable code isn't run:
- if __name__ == .__main__.:
diff --git a/.github/workflows/black.yaml b/.github/workflows/black.yaml
index 5e8b698d6..352f6b569 100644
--- a/.github/workflows/black.yaml
+++ b/.github/workflows/black.yaml
@@ -2,17 +2,20 @@ name: black
on: [push, pull_request]
+permissions:
+ contents: read
+
# use workaround due to: https://github.com/psf/black/issues/2079#issuecomment-812359146
jobs:
- check-formatting:
- runs-on: ubuntu-22.04
- steps:
- - uses: actions/checkout@v2
- - name: Set up Python 3.11
- uses: actions/setup-python@v2
- with:
- python-version: '3.11'
- - name: Install Black
- run: pip install 'black==22.6.0'
- - name: Run black --check .
- run: black --check .
+ check-formatting:
+ runs-on: ubuntu-24.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.11'
+ - name: Install Black
+ run: pip install 'black==22.6.0'
+ - name: Run black --check .
+ run: black --check .
diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml
index 0effcb038..6107ecf3c 100644
--- a/.github/workflows/coverage.yaml
+++ b/.github/workflows/coverage.yaml
@@ -1,5 +1,8 @@
name: Coverage
+permissions:
+ contents: read
+
on:
push:
branches:
@@ -10,9 +13,15 @@ on:
paths-ignore:
- 'doc/**'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
build:
- runs-on: ubuntu-22.04
+ # Deploying coverage to coveralls.io should not happen on forks
+ if: github.repository == 'terrapower/armi'
+ runs-on: ubuntu-24.04
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }}
@@ -24,16 +33,19 @@ jobs:
python-version: '3.11'
- name: Update package index
run: sudo apt-get update
- - name: Install mpi libs
- run: sudo apt-get -y install libopenmpi-dev
- - name: Install Tox and any other packages
- run: pip install tox
- - name: Run Coverage Part 1
- run: tox -e cov1
- - name: Run Coverage Part 2
- run: tox -e cov2 || true
+ - name: Install ARMI and MPI
+ run: |
+ sudo apt-get -y install libopenmpi-dev
+ pip install -e .[memprof,mpi,test]
+ - name: Run Coverage
+ run: |
+ coverage run --rcfile=pyproject.toml -m pytest -n 4 --cov=armi --cov-config=pyproject.toml --cov-report=lcov --ignore=venv armi
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=lcov --cov-append --ignore=venv armi/tests/test_mpiFeatures.py || true
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=lcov --cov-append --ignore=venv armi/tests/test_mpiParameters.py || true
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=lcov --cov-append --ignore=venv armi/tests/test_mpiDirectoryChangers.py || true
+ coverage combine --rcfile=pyproject.toml --keep -a
- name: Publish to coveralls.io
- uses: coverallsapp/github-action@v1.1.2
+ uses: coverallsapp/github-action@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- path-to-lcov: coverage.lcov
+ file: coverage.lcov
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index a73ac49a3..a13b67343 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -5,9 +5,14 @@ on:
branches:
- main
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
build:
-
+ # Building and deploying docs is broken on forked repos
+ if: github.repository == 'terrapower/armi'
runs-on: ubuntu-22.04
steps:
@@ -22,14 +27,17 @@ jobs:
run: sudo apt-get -y install libopenmpi-dev
- name: Install Pandoc
run: sudo apt-get -y install pandoc
- - name: Install Tox and any other packages
- run: pip install tox
- name: Setup Graphviz
- uses: ts-graphviz/setup-graphviz@v1
+ uses: ts-graphviz/setup-graphviz@v2.0.2
- name: Make HTML Docs
- run: tox -e doc
+ run: |
+ pip install -e .[memprof,mpi,test,docs]
+ cd doc
+ git submodule init
+ git submodule update
+ make html
- name: deploy
- uses: JamesIves/github-pages-deploy-action@4.1.5
+ uses: JamesIves/github-pages-deploy-action@v4.6.1
with:
token: ${{ secrets.ACCESS_TOKEN }}
repository-name: ${{ github.repository_owner }}/terrapower.github.io
diff --git a/.github/workflows/find_test_crumbs.py b/.github/workflows/find_test_crumbs.py
index a8bc9d296..25ea247e4 100644
--- a/.github/workflows/find_test_crumbs.py
+++ b/.github/workflows/find_test_crumbs.py
@@ -26,6 +26,8 @@
"armi/logs/armiRun.mpi.log",
"armi/tests/tutorials/case-suite/",
"armi/tests/tutorials/logs/",
+ "armiRun.h5",
+ "logs/",
]
diff --git a/.github/workflows/licensechecker.yaml b/.github/workflows/licensechecker.yaml
index 8663b647c..8a3bebe17 100644
--- a/.github/workflows/licensechecker.yaml
+++ b/.github/workflows/licensechecker.yaml
@@ -1,8 +1,13 @@
name: Check License Lines
+
+permissions:
+ contents: read
+
on: [push, pull_request]
+
jobs:
check-license-lines:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@master
- name: Check License Lines
diff --git a/.github/workflows/linting.yaml b/.github/workflows/linting.yaml
index af340c393..d72e0664a 100644
--- a/.github/workflows/linting.yaml
+++ b/.github/workflows/linting.yaml
@@ -1,11 +1,14 @@
name: Linting
+permissions:
+ contents: read
+
on: [push, pull_request]
jobs:
build:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v2
@@ -15,7 +18,7 @@ jobs:
python-version: '3.9'
- name: Update package index
run: sudo apt-get update
- - name: Install Tox and any other packages
- run: pip install tox
- name: Run Linter
- run: tox -e lint
+ run: |
+ pip install -e .[test]
+ ruff check .
diff --git a/.github/workflows/mac_tests.yaml b/.github/workflows/mac_tests.yaml
new file mode 100644
index 000000000..fc2b0f85a
--- /dev/null
+++ b/.github/workflows/mac_tests.yaml
@@ -0,0 +1,37 @@
+name: ARMI MacOS Tests
+
+permissions:
+ contents: read
+
+on:
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - 'doc/**'
+ pull_request:
+ paths-ignore:
+ - 'doc/**'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+
+ runs-on: macos-14
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.11'
+ - name: Upgrade PIP
+ run: python -m pip install --upgrade pip
+ - name: Run Unit Tests on MacOS
+ run: |
+ brew install openmpi
+ pip install -e .[memprof,mpi,test]
+ pytest -n 4 armi
diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml
new file mode 100644
index 000000000..cc0167388
--- /dev/null
+++ b/.github/workflows/stale.yaml
@@ -0,0 +1,27 @@
+# This workflow warns and then closes PRs that have had no activity for a specified amount of time.
+#
+# You can adjust the behavior by modifying this file.
+# For more information, see: https://github.com/actions/stale
+name: Mark Stale PRs
+
+on:
+ schedule:
+ # once a day at 3:14 AM
+ - cron: '14 3 * * *'
+
+permissions:
+ pull-requests: write
+
+jobs:
+ stale:
+ runs-on: ubuntu-24.04
+ steps:
+ - uses: actions/stale@v8
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-pr-message: "This pull request has been automatically marked as stale because it has not had any activity in the last 100 days. It will be closed in 7 days if no further activity occurs. Thank you for your contributions."
+ stale-pr-label: "stale"
+ days-before-pr-stale: 100
+ days-before-pr-close: 7
+ days-before-issue-stale: -1
+ operations-per-run: 100
\ No newline at end of file
diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml
index 6e1d950c3..a25a1d68c 100644
--- a/.github/workflows/unittests.yaml
+++ b/.github/workflows/unittests.yaml
@@ -1,5 +1,8 @@
name: ARMI unit tests
+permissions:
+ contents: read
+
on:
push:
paths-ignore:
@@ -8,13 +11,17 @@ on:
paths-ignore:
- 'doc/**'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
build:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
strategy:
matrix:
- python: [3.7, 3.8, 3.9, '3.10', '3.11']
+ python: [3.9, '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v2
@@ -26,7 +33,10 @@ jobs:
run: sudo apt-get update
- name: Install mpi libs
run: sudo apt-get -y install libopenmpi-dev
- - name: Install Tox and any other packages
- run: pip install tox
- - name: Run Tox
- run: tox -e test,mpitest
+ - name: Run Tests
+ run: |
+ pip install -e .[memprof,mpi,test]
+ pytest -n 4 armi
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --ignore=venv armi/tests/test_mpiFeatures.py || true
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --ignore=venv armi/tests/test_mpiParameters.py || true
+ mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --ignore=venv armi/utils/tests/test_directoryChangersMpi.py || true
diff --git a/.github/workflows/validatemanifest.yaml b/.github/workflows/validatemanifest.yaml
index c9e86c371..d7cf36c49 100644
--- a/.github/workflows/validatemanifest.yaml
+++ b/.github/workflows/validatemanifest.yaml
@@ -1,11 +1,14 @@
name: Validate Manifest
+permissions:
+ contents: read
+
on: [push, pull_request]
jobs:
build:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v2
@@ -13,7 +16,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: '3.11'
- - name: Install Tox and any other packages
- run: pip install tox
- - name: Run Tox
- run: tox -e manifest
+ - name: Validate Manifest
+ run: |
+ pip install toml
+ python .github/workflows/validatemanifest.py
diff --git a/.github/workflows/wintests.yaml b/.github/workflows/wintests.yaml
index 9faf86900..e9131aff8 100644
--- a/.github/workflows/wintests.yaml
+++ b/.github/workflows/wintests.yaml
@@ -1,13 +1,22 @@
name: ARMI Windows tests
+permissions:
+ contents: read
+
on:
push:
+ branches:
+ - main
paths-ignore:
- 'doc/**'
pull_request:
paths-ignore:
- 'doc/**'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
build:
@@ -21,9 +30,10 @@ jobs:
python-version: '3.11'
- name: Upgrade PIP
run: python -m pip install --upgrade pip
- - name: Install deps
- run: python -m pip install tox tox-gh-actions
- - name: Run Tox
- run: tox -e test
+ - name: Run Unit Tests on Windows
+ run: |
+ pip install mpi4py==3.1.6
+ pip install -e .[memprof,mpi,test]
+ pytest -n 4 armi
- name: Find Test Crumbs
run: python .github/workflows/find_test_crumbs.py
diff --git a/.gitignore b/.gitignore
index 2d6becc0d..e64adfe9f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,80 +1,80 @@
-# No non-source python resources.
+# No non-source python resources
*.pyc
-*.pyo
*.pyd
+*.pyo
*.pyx
# No build artifacts
-build
-wheelhouse
-*.lib
*.dll
-bin/*
+*.lib
+.apidocs/
/bin
-dist/
+armi/tests/tutorials/case-suite
+bin/*
+build
+coverage.lcov
+coverage.xml
+coverage_results.*
dist-*/
-*.png
+dist/
doc/_build
-doc/user/tutorials
-doc/tutorials/anl-afci-177*
-doc/tutorials/case-suite
-armi/tests/tutorials/case-suite
-.apidocs/
doc/gallery
doc/gallery-src/framework/*.yaml
-.coverage
-coverage.xml
-coverage.lcov
-coverage_results.*
+doc/tutorials/anl-afci-177*
+doc/tutorials/case-suite
+doc/user/tutorials
htmlcov/
monkeytype.*
+wheelhouse
-# No workspace crumbs.
-*~
-.*.swp
-.settings
-.vscode
-.project
-.metadata
-.idea/
-.pydevproject
+# No workspace crumbs
+**/.coverage*
+**/__pycache__
+**/logs/*
+*.ascii
+*.egg-info/
*.sublime-project
*.sublime-workspace
-*.egg-info/
-phabricator-lint.txt
-temp-*
-tags
*.temp
-.externalToolBuilders/
+*~
+.*.swp
.cache/
+.coverage
+.DS_Store
+.externalToolBuilders/
+.hypothesis/
+.idea/
+.ipynb_checkpoints
+.metadata
+.mypy_cache/
+.project
+.pydevproject
.pytest_cache/
-pytestdebug.log
-dump-temp-*
-dump-tests*
+.ruff_cache/
+.settings
+.tox
.vim-bookmarks
+.vscode
armi-venv/*
+dump-temp-*
+dump-tests*
+phabricator-lint.txt
+pytestdebug.log
+reportsOutputFiles/
+tags
+temp-*
venv*/
-.mypy_cache/
-**/__pycache__
-**/.coverage*
-**/logs/*
-*.ascii
-*.png
-# Misc. exclusions
-*.html
-*.diff
+# Ignore common data files
*.avi
-*.ppm
-*.mp4
+*.diff
*.h5
+*.html
+*.mp4
+*.ppm
*.txt
# vis files
-*.vtu
*.vtd
+*.vtu
*.xdmf
-
-# fixtures
-.ipynb_checkpoints
-.tox
diff --git a/README.rst b/README.rst
index b7b03454b..636ba0ae7 100644
--- a/README.rst
+++ b/README.rst
@@ -34,7 +34,7 @@ peak structural temperature in a design-basis transient.
.. note:: ARMI does not come with a full selection of physics kernels. They will need to
be acquired or developed for your specific project in order to make full use of this
- tool. Many of the example use-cases discussed in this manual require functionality
+ tool. Many of the example use-cases discussed in this manual require functionality
that is not included in the open-source ARMI Framework.
In general, ARMI aims to enhance the quality, ease, and rigor of computational nuclear
@@ -60,8 +60,7 @@ found in [#touranarmi]_.
Quick start
-----------
-Before starting, you need to have `Python `_ 3.9+ on
-Windows or Linux.
+Before starting, you need to have `Python `_ 3.9+.
Get the ARMI code, install the prerequisites, and fire up the launcher with the following
commands. You probably want to do this in a virtual environment as described in the `Installation
@@ -70,7 +69,7 @@ dependencies could conflict with your system dependencies.
First, upgrade your version of pip::
- $ pip install pip>=22.1
+ $ pip install -U pip>=22.1
Now clone and install ARMI::
@@ -79,16 +78,8 @@ Now clone and install ARMI::
$ pip install -e .
$ armi --help
-The easiest way to run the tests is to install `tox `_
-and then run::
-
- $ pip install -e ".[test]"
- $ tox -- -n 6
-
-This runs the unit tests in parallel on 6 processes. Omit the ``-n 6`` argument
-to run on a single process.
-
-The tests can also be run directly, using ``pytest``::
+The ARMI tests are meant to be run using `pytest `_
+locally ::
$ pip install -e ".[test]"
$ pytest -n 4 armi
@@ -132,7 +123,7 @@ Nuclear reactor design requires, among other things, answers to the following qu
* How does the building handle earthquakes?
Digital computers have assisted in nuclear technology development since the days of the
-ENIAC in the 1940s. We now understand reactor physics well enough to build detailed
+ENIAC in the 1940s. We now understand reactor physics well enough to build detailed
simulations, which can answer many of these design questions in a cost-effective, and
flexible manner. This allows us to simulate all kinds of different reactors with
different fuels, coolants, moderators, power levels, safety systems, and power cycles.
@@ -144,7 +135,7 @@ economics, and safety.
Perhaps surprisingly, some nuclear software written in the 1960s is still in use today
(mostly ported to Fortran 90 by now). These codes are validated against physical
-experiments that no longer exist. Meanwhile, new cutting-edge nuclear software is being
+experiments that no longer exist. Meanwhile, new cutting-edge nuclear software is being
developed today for powerful computers. Both old and new, these tools are often
challenging to operate and to use in concert with other sub-specialty codes that are
necessary to reach a full system analysis.
@@ -153,7 +144,7 @@ The ARMI approach was born out of this situation: how can we best leverage an ec
mix of legacy and modern tools with a small team to do full-scope analysis? We built an
environment that lets us automate the tedious, uncoupled, and error-prone parts of
reactor engineering/analysis work. We can turn around a very meaningful and detailed
-core analysis given a major change (e.g. change power by 50%) in just a few weeks. We
+core analysis given a major change (e.g. change power by 50%) in just a few weeks. We
can dispatch hundreds of parameter sweeps to multiple machines and then perform
multiobjective optimization on the resulting design space.
@@ -190,7 +181,7 @@ Automation
----------
ARMI can quickly and easily produce complex input files with high levels of detail in
-various approximations. This enables users to perform rapid high-fidelity analyses to
+various approximations. This enables users to perform rapid high-fidelity analyses to
make sure all important physics are captured. It also enables sensitivity studies of
different modeling approximations (e.g. symmetries, transport vs. diffusion vs. Monte
Carlo, subchannel vs. CFD, etc.).
@@ -223,7 +214,7 @@ and finding the peak power density is easy::
Any ARMI state can be written out to whichever format the user desires, meaning that
nominally identical cases can be produced for multiple similar codes in sensitivity
-studies. To read power densities, simply read them off the assembly objects. Instead of
+studies. To read power densities, simply read them off the assembly objects. Instead of
producing spreadsheets and making plots manually, analysts may write scripts to generate
output reports that run automatically.
@@ -242,7 +233,7 @@ Use cases
Given input describing a reactor, a typical ARMI run loops over a set of plugins in a
certain sequence. Some plugins trigger third-party simulation codes, producing input
files for them, executing them, and translating the output back onto the reactor model
-as state information. Other plugins perform physics simulations directly. A variety of
+as state information. Other plugins perform physics simulations directly. A variety of
plugins are available from TerraPower LLC with certain licensing terms, and it is our
hope that a rich ecosystem of useful plugins will be developed and curated by the
community (university research teams, national labs, other companies, etc.).
@@ -266,7 +257,7 @@ For example, one ARMI sequence may involve the calculation of:
transients.
Another sequence may simply compute the cost of feed uranium and enrichment in an
-initial core and quit. The possibilities are limited only by our creativity.
+initial core and quit. The possibilities are limited only by our creativity.
These large runs may also be run through the multiobjective design optimization system,
which runs many cases with input perturbations to help find the best overall system,
@@ -322,7 +313,7 @@ ARMI was originally created by TerraPower, LLC near Seattle WA starting in 2009.
founding mission was to determine the optimal fuel management operations required to
transition a fresh Traveling Wave Reactor core from startup into an equilibrium state.
It started out automating the Argonne National Lab (ANL) fast reactor neutronics codes,
-MC2 and REBUS. The reactor model design was made with the intention of adding other
+MC2 and REBUS. The reactor model design was made with the intention of adding other
physics capabilities later. Soon, simple thermal hydraulics were added and it's grown
ever since. It has continuously evolved towards a general reactor analysis framework.
@@ -331,16 +322,16 @@ architecture for ARMI, allowing some of the intertwined physics capabilities to
separated out as plugins from the standalone framework.
The nuclear industry is small, and it faces many challenges. It also has a tradition of
-secrecy. As a result, there is risk of overlapping work being done by other entities.
+secrecy. As a result, there is risk of overlapping work being done by other entities.
We hypothesize that collaborating on software systems can help align some efforts
worldwide, increasing quality and efficiency. In reactor development, the idea is
-generally cheap. It's the shakedown, technology and supply chain development,
+generally cheap. It's the shakedown, technology and supply chain development,
engineering demo, and commercial demo that are the hard parts.
Thus, ARMI was released under an open-source license in 2019 to facilitate mutually
beneficial collaboration across the nuclear industry, where many teams are independently
-developing similar reactor analysis/automation frameworks. TerraPower will make its
+developing similar reactor analysis/automation frameworks. TerraPower will make its
proprietary analysis routines, physics kernels, and material properties available under
commercial licenses.
@@ -381,16 +372,15 @@ needs of thermal reactors (like a good spatial description of pin maps) exists b
has not been subject to as much use.
ARMI was developed within a rapidly changing R&D environment. It evolved accordingly,
-and naturally carries some legacy. We continuously attempt to identify and update
-problematic parts of the code. Users should understand that ARMI is not a polished
-consumer software product, but rather a powerful and flexible engineering tool. It has
+and naturally carries some legacy. We continuously attempt to identify and update
+problematic parts of the code. Users should understand that ARMI is not a polished
+consumer software product, but rather a powerful and flexible engineering tool. It has
the potential to accelerate work on many kinds of reactors. But in many cases, it will
require serious and targeted investment.
ARMI was largely written by nuclear and mechanical engineers. We (as a whole) only
really, truly, recognized the value of things like static typing in a complex system
-like ARMI somewhat recently. Contributions from software engineers are *more than*
-welcome!
+like ARMI somewhat recently.
ARMI has been written to support specific engineering/design tasks. As such, polish in
the GUIs and output is somewhat lacking.
@@ -427,7 +417,7 @@ Be careful when including any dependency in ARMI (say in the ``pyproject.toml``
to include anything with a license that superceeds our Apache license. For instance,
any third-party Python library included in ARMI with a GPL license will make the whole
project fall under the GPL license. But a lot of potential users of ARMI will want to
-keep some of their work private, so we can't allow any GPL tools.
+keep some of their work private, so we can't allow any GPL dependencies.
For that reason, it is generally considered best-practice in the ARMI ecosystem to
only use third-party Python libraries that have MIT or BSD licenses.
diff --git a/armi/_bootstrap.py b/armi/_bootstrap.py
index 7d522cd3b..9a568fd0c 100644
--- a/armi/_bootstrap.py
+++ b/armi/_bootstrap.py
@@ -15,7 +15,6 @@
"""Code that needs to be executed before most ARMI components are safe to import."""
import sys
-import tabulate
# This is a courtesy, to help people who accidently run ARMI with an old version of Python.
if (
@@ -29,27 +28,7 @@
)
-def _addCustomTabulateTables():
- """Create a custom ARMI tables within tabulate."""
- tabulate._table_formats["armi"] = tabulate.TableFormat(
- lineabove=tabulate.Line("", "-", " ", ""),
- linebelowheader=tabulate.Line("", "-", " ", ""),
- linebetweenrows=None,
- linebelow=tabulate.Line("", "-", " ", ""),
- headerrow=tabulate.DataRow("", " ", ""),
- datarow=tabulate.DataRow("", " ", ""),
- padding=0,
- with_header_hide=None,
- )
- tabulate.tabulate_formats = list(sorted(tabulate._table_formats.keys()))
- tabulate.multiline_formats["armi"] = "armi"
-
-
-# runLog makes tables, so make sure this is setup before we initialize the runLog
-_addCustomTabulateTables()
-
-
-from armi.nucDirectory import nuclideBases # noqa: module-import-not-at-top-of-file
+from armi.nucDirectory import nuclideBases # noqa: E402
# Nuclide bases get built explicitly here to have better determinism
# about when they get instantiated. The burn chain is not applied
diff --git a/armi/bookkeeping/db/__init__.py b/armi/bookkeeping/db/__init__.py
index ca0f5b4ed..a4d7078e2 100644
--- a/armi/bookkeeping/db/__init__.py
+++ b/armi/bookkeeping/db/__init__.py
@@ -78,7 +78,13 @@
]
-def loadOperator(pathToDb, loadCycle, loadNode, allowMissing=False):
+def loadOperator(
+ pathToDb,
+ loadCycle,
+ loadNode,
+ statePointName=None,
+ allowMissing=False,
+):
"""
Return an operator given the path to a database.
@@ -90,6 +96,9 @@ def loadOperator(pathToDb, loadCycle, loadNode, allowMissing=False):
The cycle to load the reactor state from.
loadNode : int
The time node to load the reactor from.
+ statePointName: str
+ State point name at the end, E.G. `EOC` or `EOL`.
+ Full name would be C0N2EOC, see database3.getH5GroupName
allowMissing : bool
Whether to emit a warning, rather than crash if reading a database
with undefined parameters. Default False.
@@ -137,7 +146,12 @@ def loadOperator(pathToDb, loadCycle, loadNode, allowMissing=False):
cs = db.loadCS()
thisCase = cases.Case(cs)
- r = db.load(loadCycle, loadNode, allowMissing=allowMissing)
+ r = db.load(
+ loadCycle,
+ loadNode,
+ statePointName=statePointName,
+ allowMissing=allowMissing,
+ )
o = thisCase.initializeOperator(r=r)
runLog.important(
diff --git a/armi/bookkeeping/db/compareDB3.py b/armi/bookkeeping/db/compareDB3.py
index f46cb23c0..173f6b5c0 100644
--- a/armi/bookkeeping/db/compareDB3.py
+++ b/armi/bookkeeping/db/compareDB3.py
@@ -49,9 +49,8 @@
import re
import traceback
-from tabulate import tabulate
import h5py
-import numpy
+import numpy as np
from armi import runLog
from armi.bookkeeping.db import database3
@@ -59,6 +58,7 @@
from armi.bookkeeping.db.factory import databaseFactory
from armi.bookkeeping.db.permissions import Permissions
from armi.reactor.composites import ArmiObject
+from armi.utils.tabulate import tabulate
class OutputWriter:
@@ -329,7 +329,7 @@ def _diffSpecialData(
diffResults.addStructureDiffs(nDiffs)
if not keysMatch:
- diffResults.addDiff(name, name, numpy.inf, numpy.inf, numpy.inf)
+ diffResults.addDiff(name, name, np.inf, np.inf, np.inf)
return
if srcData.attrs.get("dict", False):
@@ -341,7 +341,7 @@ def _diffSpecialData(
for k, srcAttr in srcData.attrs.items():
refAttr = refData.attrs[k]
- if isinstance(srcAttr, numpy.ndarray) and isinstance(refAttr, numpy.ndarray):
+ if isinstance(srcAttr, np.ndarray) and isinstance(refAttr, np.ndarray):
srcFlat = srcAttr.flatten()
refFlat = refAttr.flatten()
if len(srcFlat) != len(refFlat):
@@ -374,12 +374,10 @@ def _diffSpecialData(
diff = []
for dSrc, dRef in zip(src.tolist(), ref.tolist()):
- if isinstance(dSrc, numpy.ndarray) and isinstance(dRef, numpy.ndarray):
+ if isinstance(dSrc, np.ndarray) and isinstance(dRef, np.ndarray):
if dSrc.shape != dRef.shape:
out.writeln("Shapes did not match for {}".format(refData))
- diffResults.addDiff(
- compName, paramName, numpy.inf, numpy.inf, numpy.inf
- )
+ diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf)
return
# make sure not to try to compare empty arrays. Numpy is mediocre at
@@ -395,7 +393,7 @@ def _diffSpecialData(
if (dSrc is None) ^ (dRef is None):
out.writeln("Mismatched Nones for {} in {}".format(paramName, compName))
- diff.append([numpy.inf])
+ diff.append([np.inf])
continue
if dSrc is None:
@@ -410,12 +408,12 @@ def _diffSpecialData(
if dSrc == dRef:
diff.append([0.0])
else:
- diff.append([numpy.inf])
+ diff.append([np.inf])
if diff:
try:
- diff = [numpy.array(d).flatten() for d in diff]
- diff = numpy.concatenate(diff)
+ diff = [np.array(d).flatten() for d in diff]
+ diff = np.concatenate(diff)
except ValueError as e:
out.writeln(
"Failed to concatenate diff data for {} in {}: {}".format(
@@ -424,10 +422,10 @@ def _diffSpecialData(
)
out.writeln("Because: {}".format(e))
return
- absDiff = numpy.abs(diff)
- mean = numpy.nanmean(diff)
- absMax = numpy.nanmax(absDiff)
- absMean = numpy.nanmean(absDiff)
+ absDiff = np.abs(diff)
+ mean = np.nanmean(diff)
+ absMax = np.nanmax(absDiff)
+ absMean = np.nanmean(absDiff)
diffResults.addDiff(compName, paramName, absMean, mean, absMax)
@@ -448,21 +446,21 @@ def _diffSimpleData(ref: h5py.Dataset, src: h5py.Dataset, diffResults: DiffResul
runLog.error("Failed to compare {} in {}".format(paramName, compName))
runLog.error("source: {}".format(src))
runLog.error("reference: {}".format(ref))
- diff = numpy.array([numpy.inf])
+ diff = np.array([np.inf])
except ValueError:
runLog.error("Failed to compare {} in {}".format(paramName, compName))
runLog.error("source: {}".format(src))
runLog.error("reference: {}".format(ref))
- diff = numpy.array([numpy.inf])
+ diff = np.array([np.inf])
if 0 in diff.shape:
# Empty list, no diff
return
- absDiff = numpy.abs(diff)
- mean = numpy.nanmean(diff)
- absMax = numpy.nanmax(absDiff)
- absMean = numpy.nanmean(absDiff)
+ absDiff = np.abs(diff)
+ mean = np.nanmean(diff)
+ absMax = np.nanmax(absDiff)
+ absMean = np.nanmean(absDiff)
diffResults.addDiff(compName, paramName, absMean, mean, absMax)
@@ -501,9 +499,7 @@ def _compareComponentData(
paramName, refSpecial, srcSpecial
)
)
- diffResults.addDiff(
- refGroup.name, paramName, numpy.inf, numpy.inf, numpy.inf
- )
+ diffResults.addDiff(refGroup.name, paramName, np.inf, np.inf, np.inf)
continue
if srcSpecial or refSpecial:
diff --git a/armi/bookkeeping/db/database3.py b/armi/bookkeeping/db/database3.py
index ee67f70dd..4f85a2b9b 100644
--- a/armi/bookkeeping/db/database3.py
+++ b/armi/bookkeeping/db/database3.py
@@ -33,6 +33,7 @@
"""
import collections
import copy
+import gc
import io
import itertools
import os
@@ -43,43 +44,37 @@
import sys
from platform import uname
from typing import (
- Optional,
- Tuple,
- Type,
- Dict,
Any,
+ Dict,
+ Generator,
List,
+ Optional,
Sequence,
- Generator,
+ Tuple,
+ Type,
)
import h5py
-import numpy
+import numpy as np
-from armi import context
-from armi import getApp
-from armi import meta
-from armi import runLog
-from armi import settings
+from armi import context, getApp, meta, runLog, settings
+from armi.bookkeeping.db.jaggedArray import JaggedArray
from armi.bookkeeping.db.layout import (
- Layout,
DB_VERSION,
+ Layout,
replaceNonesWithNonsense,
replaceNonsenseWithNones,
)
-from armi.bookkeeping.db.typedefs import History, Histories
+from armi.bookkeeping.db.typedefs import Histories, History
from armi.nucDirectory import nuclideBases
from armi.physics.neutronics.settings import CONF_LOADING_FILE
-from armi.reactor import grids
-from armi.reactor import parameters
-from armi.reactor import systemLayoutInput
+from armi.reactor import grids, parameters, systemLayoutInput
from armi.reactor.assemblies import Assembly
from armi.reactor.blocks import Block
from armi.reactor.components import Component
from armi.reactor.composites import ArmiObject
-from armi.reactor.flags import Flags
from armi.reactor.parameters import parameterCollections
-from armi.reactor.reactors import Core
+from armi.reactor.reactors import Core, Reactor
from armi.settings.fwSettings.globalSettings import CONF_SORT_REACTOR
from armi.utils import getNodesPerCycle
from armi.utils.textProcessors import resolveMarkupInclusions
@@ -124,7 +119,8 @@ class Database3:
`doc/user/outputs/database` for more details.
"""
- timeNodeGroupPattern = re.compile(r"^c(\d\d)n(\d\d)$")
+ # Allows matching for, e.g., c01n02EOL
+ timeNodeGroupPattern = re.compile(r"^c(\d\d)n(\d\d).*$")
def __init__(self, fileName: os.PathLike, permission: str):
"""
@@ -223,7 +219,7 @@ def open(self):
(os.path.abspath(sys.modules[p[1].__module__].__file__), p[1].__name__)
for p in plugins
]
- ps = numpy.array([str(p[0]) + ":" + str(p[1]) for p in ps]).astype("S")
+ ps = np.array([str(p[0]) + ":" + str(p[1]) for p in ps]).astype("S")
self.h5db.attrs["pluginPaths"] = ps
self.h5db.attrs["localCommitHash"] = Database3.grabLocalCommitHash()
@@ -246,7 +242,7 @@ def writeSystemAttributes(h5db):
h5db.attrs["python"] = sys.version
h5db.attrs["armiLocation"] = os.path.dirname(context.ROOT)
h5db.attrs["startTime"] = context.START_TIME
- h5db.attrs["machines"] = numpy.array(context.MPI_NODENAMES).astype("S")
+ h5db.attrs["machines"] = np.array(context.MPI_NODENAMES).astype("S")
# store platform data
platform_data = uname()
@@ -291,7 +287,7 @@ def grabLocalCommitHash():
try:
commit_hash = subprocess.check_output(["git", "describe"])
return commit_hash.decode("utf-8").strip()
- except: # noqa: bare-except
+ except Exception:
return unknown
else:
return unknown
@@ -652,7 +648,7 @@ def getH5Group(self, r, statePointName=None):
if groupName in self.h5db:
return self.h5db[groupName]
else:
- group = self.h5db.create_group(groupName)
+ group = self.h5db.create_group(groupName, track_order=True)
group.attrs["cycle"] = r.p.cycle
group.attrs["timeNode"] = r.p.timeNode
return group
@@ -688,8 +684,18 @@ def syncToSharedFolder(self):
"""
runLog.extra("Copying DB to shared working directory.")
self.h5db.flush()
+
+ # Close the h5 file so it can be copied
+ self.h5db.close()
+ self.h5db = None
shutil.copy(self._fullPath, self._fileName)
+ # Garbage collect so we don't have multiple databases hanging around in memory
+ gc.collect()
+
+ # Reload the file in append mode and continue on our merry way
+ self.h5db = h5py.File(self._fullPath, "r+")
+
def load(
self,
cycle,
@@ -858,24 +864,31 @@ def _compose(self, comps, cs, parent=None):
comp.add(child)
if isinstance(comp, Core):
- # TODO: This is also an issue related to geoms and which core is "The Core".
- # We only have a good geom for the main core, so can't do process loading on
- # the SFP, etc.
- if comp.hasFlags(Flags.CORE):
- comp.processLoading(cs, dbLoad=True)
+ comp.processLoading(cs, dbLoad=True)
elif isinstance(comp, Assembly):
comp.calculateZCoords()
+ elif isinstance(comp, Component):
+ comp.finalizeLoadingFromDB()
return comp
- def _writeParams(self, h5group, comps):
+ def _writeParams(self, h5group, comps) -> tuple:
+ def _getShape(arr: [np.ndarray, List, Tuple]):
+ """Get the shape of a np.ndarray, list, or tuple."""
+ if isinstance(arr, np.ndarray):
+ return arr.shape
+ elif isinstance(arr, (list, tuple)):
+ return (len(arr),)
+ else:
+ return (1,)
+
c = comps[0]
groupName = c.__class__.__name__
if groupName not in h5group:
# Only create the group if it doesnt already exist. This happens when
# re-writing params in the same time node (e.g. something changed between
# EveryNode and EOC)
- g = h5group.create_group(groupName)
+ g = h5group.create_group(groupName, track_order=True)
else:
g = h5group[groupName]
@@ -895,9 +908,9 @@ def _writeParams(self, h5group, comps):
linkedDims.append("")
data.append(val)
- data = numpy.array(data)
+ data = np.array(data)
if any(linkedDims):
- attrs["linkedDims"] = numpy.array(linkedDims).astype("S")
+ attrs["linkedDims"] = np.array(linkedDims).astype("S")
else:
# NOTE: after loading, the previously unset values will be defaulted
temp = [c.p.get(paramDef.name, paramDef.default) for c in comps]
@@ -912,41 +925,46 @@ def _writeParams(self, h5group, comps):
attrs[_SERIALIZER_NAME] = paramDef.serializer.__name__
attrs[_SERIALIZER_VERSION] = paramDef.serializer.version
else:
- data = numpy.array(temp)
+ # check if temp is a jagged array
+ if any(isinstance(x, (np.ndarray, list)) for x in temp):
+ jagged = len(set([_getShape(x) for x in temp])) != 1
+ else:
+ jagged = False
+ data = (
+ JaggedArray(temp, paramDef.name) if jagged else np.array(temp)
+ )
del temp
- # Convert Unicode to byte-string
- if data.dtype.kind == "U":
- data = data.astype("S")
-
- if data.dtype.kind == "O":
- # Something was added to the data array that caused numpy to want to
- # treat it as a general-purpose Object array. This usually happens
- # because:
- # - the data contain NoDefaults
- # - the data contain one or more Nones,
- # - the data contain special types like tuples, dicts, etc
- # - the data are composed of arrays that numpy would otherwise happily
- # convert to a higher-order array, but the dimensions of the sub-arrays
- # are inconsistent ("jagged")
- # - there is some sort of honest-to-goodness weird object
- # We want to support the first two cases with minimal intrusion, since
- # these should be pretty easy to faithfully represent in the db. The
- # jagged case should be supported as well, but may require a less
- # faithful representation (e.g. flattened), but the last case isn't
- # really worth supporting.
-
- # Here is one proposal:
- # - Check to see if the array is jagged. all(shape == shape[0]). If not,
- # flatten, store the data offsets and array shapes, and None locations
- # as attrs
- # - If not jagged, all top-level ndarrays are the same shape, so it is
- # easier to replace Nones with ndarrays filled with special values.
- if parameters.NoDefault in data:
- data = None
- else:
- data, specialAttrs = packSpecialData(data, paramDef.name)
- attrs.update(specialAttrs)
+ # - Check to see if the array is jagged. If so, flatten, store the
+ # data offsets and array shapes, and None locations as attrs.
+ # - If not jagged, all top-level ndarrays are the same shape, so it is
+ # easier to replace Nones with ndarrays filled with special values.
+ if isinstance(data, JaggedArray):
+ data, specialAttrs = packSpecialData(data, paramDef.name)
+ attrs.update(specialAttrs)
+
+ else: # np.ndarray
+ # Convert Unicode to byte-string
+ if data.dtype.kind == "U":
+ data = data.astype("S")
+
+ if data.dtype.kind == "O":
+ # Something was added to the data array that caused np to want to
+ # treat it as a general-purpose Object array. This usually happens
+ # because:
+ # - the data contain NoDefaults
+ # - the data contain one or more Nones,
+ # - the data contain special types like tuples, dicts, etc
+ # - there is some sort of honest-to-goodness weird object
+ # We want to support the first two cases with minimal intrusion, since
+ # these should be pretty easy to faithfully represent in the db.
+ # The last case isn't really worth supporting.
+
+ if parameters.NoDefault in data:
+ data = None
+ else:
+ data, specialAttrs = packSpecialData(data, paramDef.name)
+ attrs.update(specialAttrs)
if data is None:
continue
@@ -958,7 +976,9 @@ def _writeParams(self, h5group, comps):
"should have been empty".format(paramDef.name, g)
)
- dataset = g.create_dataset(paramDef.name, data=data, compression="gzip")
+ dataset = g.create_dataset(
+ paramDef.name, data=data, compression="gzip", track_order=True
+ )
if any(attrs):
Database3._writeAttrs(dataset, h5group, attrs)
except Exception:
@@ -982,7 +1002,9 @@ def _addHomogenizedNumberDensityParams(blocks, h5group):
nDens = collectBlockNumberDensities(blocks)
for nucName, numDens in nDens.items():
- h5group.create_dataset(nucName, data=numDens, compression="gzip")
+ h5group.create_dataset(
+ nucName, data=numDens, compression="gzip", track_order=True
+ )
@staticmethod
def _readParams(h5group, compTypeName, comps, allowMissing=False):
@@ -1027,23 +1049,23 @@ def _readParams(h5group, compTypeName, comps, allowMissing=False):
assert dataSet.attrs[_SERIALIZER_NAME] == pDef.serializer.__name__
assert _SERIALIZER_VERSION in dataSet.attrs
- data = numpy.array(
+ data = np.array(
pDef.serializer.unpack(
data, dataSet.attrs[_SERIALIZER_VERSION], attrs
)
)
- if data.dtype.type is numpy.string_:
- data = numpy.char.decode(data)
+ if data.dtype.type is np.string_:
+ data = np.char.decode(data)
if attrs.get("specialFormatting", False):
data = unpackSpecialData(data, attrs, paramName)
linkedDims = []
if "linkedDims" in attrs:
- linkedDims = numpy.char.decode(attrs["linkedDims"])
+ linkedDims = np.char.decode(attrs["linkedDims"])
- # iterating of numpy is not fast...
+ # iterating of np is not fast...
for c, val, linkedDim in itertools.zip_longest(
comps, data.tolist(), linkedDims, fillvalue=""
):
@@ -1171,7 +1193,7 @@ def getHistoriesByLocation(
lLocation = layout.location
# filter for objects that live under the desired ancestor and at a desired location
- objectIndicesInLayout = numpy.array(
+ objectIndicesInLayout = np.array(
[
i
for i, (ancestor, loc) in enumerate(zip(ancestors, lLocation))
@@ -1182,7 +1204,7 @@ def getHistoriesByLocation(
# This could also be way more efficient if lLocation were a numpy array
objectLocationsInLayout = [lLocation[i] for i in objectIndicesInLayout]
- objectIndicesInData = numpy.array(layout.indexInData)[
+ objectIndicesInData = np.array(layout.indexInData)[
objectIndicesInLayout
].tolist()
@@ -1199,7 +1221,7 @@ def getHistoriesByLocation(
for paramName in params or h5GroupForType.keys():
if paramName == "location":
# location is special, since it is stored in layout/
- data = numpy.array(layout.location)[objectIndicesInLayout]
+ data = np.array(layout.location)[objectIndicesInLayout]
elif paramName in h5GroupForType:
dataSet = h5GroupForType[paramName]
try:
@@ -1212,8 +1234,8 @@ def getHistoriesByLocation(
)
raise
- if data.dtype.type is numpy.string_:
- data = numpy.char.decode(data)
+ if data.dtype.type is np.string_:
+ data = np.char.decode(data)
if dataSet.attrs.get("specialFormatting", False):
if dataSet.attrs.get("nones", False):
@@ -1228,7 +1250,7 @@ def getHistoriesByLocation(
)
else:
# Nothing in the database for this param, so use the default value
- data = numpy.repeat(
+ data = np.repeat(
parameters.byNameAndType(paramName, compType).default,
len(comps),
)
@@ -1295,8 +1317,7 @@ def getHistories(
Returns
-------
dict
- Dictionary ArmiObject (input): dict of str/list pairs containing ((cycle,
- node), value).
+ Dictionary ArmiObject (input): dict of str/list pairs containing ((cycle, node), value).
"""
histData: Histories = {
c: collections.defaultdict(collections.OrderedDict) for c in comps
@@ -1313,8 +1334,7 @@ def getHistories(
if "layout" not in h5TimeNodeGroup:
# Layout hasn't been written for this time step, so whatever is in there
# didn't come from the DatabaseInterface. Probably because it's the
- # current time step and something has created the group to store aux
- # data
+ # current time step and something has created the group to store aux data
continue
cycle = h5TimeNodeGroup.attrs["cycle"]
@@ -1334,7 +1354,7 @@ def getHistories(
)
)
raise ee
- layoutIndicesForType = numpy.where(layout.type == compTypeName)[0]
+ layoutIndicesForType = np.where(layout.type == compTypeName)[0]
serialNumsForType = layout.serialNum[layoutIndicesForType].tolist()
layoutIndexInData = layout.indexInData[layoutIndicesForType].tolist()
@@ -1357,10 +1377,10 @@ def getHistories(
# 3) not performing parameter renaming. This may become necessary
for paramName in params or h5GroupForType.keys():
if paramName == "location":
- # cast to a numpy array so that we can use list indices
- data = numpy.array(layout.location)[layoutIndicesForType][
- indexInData
- ]
+ locs = []
+ for id in indexInData:
+ locs.append((layout.location[layoutIndicesForType[id]]))
+ data = np.array(locs)
elif paramName in h5GroupForType:
dataSet = h5GroupForType[paramName]
try:
@@ -1373,8 +1393,8 @@ def getHistories(
)
raise
- if data.dtype.type is numpy.string_:
- data = numpy.char.decode(data)
+ if data.dtype.type is np.string_:
+ data = np.char.decode(data)
if dataSet.attrs.get("specialFormatting", False):
if dataSet.attrs.get("nones", False):
@@ -1389,26 +1409,28 @@ def getHistories(
)
else:
# Nothing in the database, so use the default value
- data = numpy.repeat(
+ data = np.repeat(
parameters.byNameAndType(paramName, compType).default,
len(reorderedComps),
)
- # iterating of numpy is not fast..
+ # iterating of np is not fast..
for c, val in zip(reorderedComps, data.tolist()):
- if isinstance(val, list):
- val = numpy.array(val)
+ if paramName == "location":
+ val = tuple(val)
+ elif isinstance(val, list):
+ val = np.array(val)
histData[c][paramName][cycle, timeNode] = val
- r = comps[0].getAncestorWithFlags(Flags.REACTOR)
+ r = comps[0].getAncestor(lambda c: isinstance(c, Reactor))
cycleNode = r.p.cycle, r.p.timeNode
for c, paramHistories in histData.items():
for paramName, hist in paramHistories.items():
if cycleNode not in hist:
try:
hist[cycleNode] = c.p[paramName]
- except: # noqa: bare-except
+ except Exception:
if paramName == "location":
hist[cycleNode] = c.spatialLocator.indices
@@ -1490,8 +1512,8 @@ def _resolveAttrs(attrs, group):
def packSpecialData(
- data: numpy.ndarray, paramName: str
-) -> Tuple[Optional[numpy.ndarray], Dict[str, Any]]:
+ arrayData: [np.ndarray, JaggedArray], paramName: str
+) -> Tuple[Optional[np.ndarray], Dict[str, Any]]:
"""
Reduce data that wouldn't otherwise play nicely with HDF5/numpy arrays to a format
that will.
@@ -1522,9 +1544,10 @@ def packSpecialData(
Parameters
----------
- data
- An ndarray storing the data that we want to stuff into the database. These are
- usually dtype=Object, which is how we usually end up here in the first place.
+ arrayData
+ An ndarray or JaggedArray object storing the data that we want to stuff into
+ the database. If the data is jagged, a special JaggedArray instance is passed
+ in, which contains a 1D array with offsets and shapes.
paramName
The parameter name that we are trying to store data for. This is mostly used for
@@ -1534,10 +1557,15 @@ def packSpecialData(
--------
unpackSpecialData
"""
- # Check to make sure that we even need to do this. If the numpy data type is
- # not "O", chances are we have nice, clean data.
- if data.dtype != "O":
- return data, {}
+ if isinstance(arrayData, JaggedArray):
+ data = arrayData.flattenedArray
+ else:
+ # Check to make sure that we even need to do this. If the numpy data type is
+ # not "O", chances are we have nice, clean data.
+ if arrayData.dtype != "O":
+ return arrayData, {}
+ else:
+ data = arrayData
attrs: Dict[str, Any] = {"specialFormatting": True}
@@ -1546,7 +1574,7 @@ def packSpecialData(
# find locations of Nones. The below works for ndarrays, whereas `data == None`
# gives a single True/False value
- nones = numpy.where([d is None for d in data])[0]
+ nones = np.where([d is None for d in data])[0]
if len(nones) == data.shape[0]:
# Everything is None, so why bother?
@@ -1560,7 +1588,7 @@ def packSpecialData(
# A robust solution would need
# to do this on a case-by-case basis, and re-do it any time we want to
# write, since circumstances may change. Not only that, but we may need
- # to do perform more that one of these operations to get to an array
+ # to perform more than one of these operations to get to an array
# that we want to put in the database.
if any(isinstance(d, dict) for d in data):
# we're assuming that a dict is {str: float}. We store the union of
@@ -1573,7 +1601,7 @@ def packSpecialData(
# for most keys.
attrs["dict"] = True
keys = sorted({k for d in data for k in d})
- data = numpy.array([[d.get(k, numpy.nan) for k in keys] for d in data])
+ data = np.array([[d.get(k, np.nan) for k in keys] for d in data])
if data.dtype == "O":
# The data themselves are nasty. We could support this, but best to wait for
# a credible use case.
@@ -1581,60 +1609,28 @@ def packSpecialData(
"Unable to coerce dictionary data into usable numpy array for "
"{}".format(paramName)
)
- attrs["keys"] = numpy.array(keys).astype("S")
+ attrs["keys"] = np.array(keys).astype("S")
+
+ return data, attrs
+ if isinstance(arrayData, JaggedArray):
+ attrs["jagged"] = True
+ attrs["offsets"] = arrayData.offsets
+ attrs["shapes"] = arrayData.shapes
+ attrs["noneLocations"] = arrayData.nones
return data, attrs
# conform non-numpy arrays to numpy
for i, val in enumerate(data):
if isinstance(val, (list, tuple)):
- data[i] = numpy.array(val)
+ data[i] = np.array(val)
- if not any(isinstance(d, numpy.ndarray) for d in data):
+ if not any(isinstance(d, np.ndarray) for d in data):
# looks like 1-D plain-old-data
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
- # check if data is jagged
- candidate = next((d for d in data if d is not None))
- shape = candidate.shape
- ndim = candidate.ndim
- isJagged = (
- not all(d.shape == shape for d in data if d is not None) or candidate.size == 0
- )
-
- if isJagged:
- assert all(
- val.ndim == ndim for val in data if val is not None
- ), "Inconsistent dimensions in jagged array for: {}\nDimensions: {}".format(
- paramName, [val.ndim for val in data if val is not None]
- )
- attrs["jagged"] = True
-
- # offsets[i] is the index of the zero-th element of sub-array i
- offsets = numpy.array(
- [0]
- + list(
- itertools.accumulate(val.size if val is not None else 0 for val in data)
- )[:-1]
- )
-
- # shapes[i] is the shape of the i-th sub-array. Nones are represented by all
- # zeros
- shapes = numpy.array(
- list(val.shape if val is not None else ndim * (0,) for val in data)
- )
-
- data = numpy.delete(data, nones)
-
- data = numpy.concatenate(data, axis=None)
-
- attrs["offsets"] = offsets
- attrs["shapes"] = shapes
- attrs["noneLocations"] = nones
- return data, attrs
-
- if any(isinstance(d, (tuple, list, numpy.ndarray)) for d in data):
+ if any(isinstance(d, (tuple, list, np.ndarray)) for d in data):
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
@@ -1648,7 +1644,7 @@ def packSpecialData(
raise TypeError("Failed to process special data for {}".format(paramName))
-def unpackSpecialData(data: numpy.ndarray, attrs, paramName: str) -> numpy.ndarray:
+def unpackSpecialData(data: np.ndarray, attrs, paramName: str) -> np.ndarray:
"""
Extract data from a specially-formatted HDF5 dataset into a numpy array.
@@ -1667,7 +1663,7 @@ def unpackSpecialData(data: numpy.ndarray, attrs, paramName: str) -> numpy.ndarr
Returns
-------
- numpy.ndarray
+ np.ndarray
An ndarray containing the closest possible representation of the data that was
originally written to the database.
@@ -1687,30 +1683,18 @@ def unpackSpecialData(data: numpy.ndarray, attrs, paramName: str) -> numpy.ndarr
if attrs.get("jagged", False):
offsets = attrs["offsets"]
shapes = attrs["shapes"]
- ndim = len(shapes[0])
- emptyArray = numpy.ndarray(ndim * (0,), dtype=data.dtype)
- unpackedJaggedData: List[Optional[numpy.ndarray]] = []
- for offset, shape in zip(offsets, shapes):
- if tuple(shape) == ndim * (0,):
- # Start with an empty array. This may be replaced with a None later
- unpackedJaggedData.append(emptyArray)
- else:
- unpackedJaggedData.append(
- numpy.ndarray(shape, dtype=data.dtype, buffer=data[offset:])
- )
- for i in attrs["noneLocations"]:
- unpackedJaggedData[i] = None
-
- return numpy.array(unpackedJaggedData, dtype=object)
+ nones = attrs["noneLocations"]
+ data = JaggedArray.fromH5(data, offsets, shapes, nones, data.dtype, paramName)
+ return data
if attrs.get("dict", False):
- keys = numpy.char.decode(attrs["keys"])
+ keys = np.char.decode(attrs["keys"])
unpackedData = []
assert data.ndim == 2
for d in data:
unpackedData.append(
- {key: value for key, value in zip(keys, d) if not numpy.isnan(value)}
+ {key: value for key, value in zip(keys, d) if not np.isnan(value)}
)
- return numpy.array(unpackedData)
+ return np.array(unpackedData)
raise ValueError(
"Do not recognize the type of special formatting that was applied "
@@ -1718,7 +1702,7 @@ def unpackSpecialData(data: numpy.ndarray, attrs, paramName: str) -> numpy.ndarr
)
-def collectBlockNumberDensities(blocks) -> Dict[str, numpy.ndarray]:
+def collectBlockNumberDensities(blocks) -> Dict[str, np.ndarray]:
"""
Collect block-by-block homogenized number densities for each nuclide.
@@ -1739,7 +1723,7 @@ def collectBlockNumberDensities(blocks) -> Dict[str, numpy.ndarray]:
nucDensityMatrix = []
for block in blocks:
nucDensityMatrix.append(block.getNuclideNumberDensities(nucNames))
- nucDensityMatrix = numpy.array(nucDensityMatrix)
+ nucDensityMatrix = np.array(nucDensityMatrix)
dataDict = dict()
for ni, nb in enumerate(nucBases):
diff --git a/armi/bookkeeping/db/databaseInterface.py b/armi/bookkeeping/db/databaseInterface.py
index 5c90a6d03..8ba46dacc 100644
--- a/armi/bookkeeping/db/databaseInterface.py
+++ b/armi/bookkeeping/db/databaseInterface.py
@@ -151,36 +151,35 @@ def interactEveryNode(self, cycle, node):
if self.o.cs["tightCoupling"]:
# h5 cant handle overwriting so we skip here and write once the tight coupling loop has completed
return
- self.writeDBEveryNode(cycle, node)
+ self.writeDBEveryNode()
- def writeDBEveryNode(self, cycle, node):
+ def writeDBEveryNode(self):
"""Write the database at the end of the time node."""
- # skip writing for last burn step since it will be written at interact EOC
- if node < self.o.burnSteps[cycle]:
- self.r.core.p.minutesSinceStart = (
- time.time() - self.r.core.timeOfStart
- ) / 60.0
- self._db.writeToDB(self.r)
- if self.cs[CONF_SYNC_AFTER_WRITE]:
- self._db.syncToSharedFolder()
+ self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0
+ self._db.writeToDB(self.r)
+ if self.cs[CONF_SYNC_AFTER_WRITE]:
+ self._db.syncToSharedFolder()
def interactEOC(self, cycle=None):
- """In case anything changed since last cycle (e.g. rxSwing), update DB. (End of Cycle)."""
- # We cannot presume whether we are at EOL based on cycle and cs["nCycles"],
- # since cs["nCycles"] is not a difinitive indicator of EOL; ultimately the
- # Operator has the final say.
- if not self.o.atEOL:
- self.r.core.p.minutesSinceStart = (
- time.time() - self.r.core.timeOfStart
- ) / 60.0
- self._db.writeToDB(self.r)
+ """
+ Dont write; this state doesn't tend to be important since its decay only step.
+
+ Notes
+ -----
+ The same time is available at start of next cycle.
+ """
+ return
def interactEOL(self):
"""DB's should be closed at run's end. (End of Life)."""
# minutesSinceStarts should include as much of the ARMI run as possible so EOL
# is necessary, too.
self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0
- self._db.writeToDB(self.r)
+ self._db.writeToDB(self.r, "EOL")
+ self.closeDB()
+
+ def closeDB(self):
+ """Close the DB, writing to file."""
self._db.close(True)
def interactError(self):
@@ -194,7 +193,7 @@ def interactError(self):
# writing
self._db.writeToDB(self.r, "error")
self._db.close(False)
- except: # noqa: bare-except; we're already responding to an error
+ except Exception: # we're already responding to an error
pass
def interactDistributeState(self) -> None:
diff --git a/armi/tests/sfpGeom.yaml b/armi/bookkeeping/db/db_migration_to_v1.2.0.py
similarity index 100%
rename from armi/tests/sfpGeom.yaml
rename to armi/bookkeeping/db/db_migration_to_v1.2.0.py
diff --git a/armi/bookkeeping/db/debug_db.py b/armi/bookkeeping/db/debug_db.py
new file mode 100644
index 000000000..d823724fa
--- /dev/null
+++ b/armi/bookkeeping/db/debug_db.py
@@ -0,0 +1,22 @@
+import nala
+
+nala.configure()
+
+import os
+
+from armi.bookkeeping.db import loadOperator
+
+cycle = 0
+node = 0
+
+fileName = "natrium-combined-c0n0-v1.1.0.h5"
+filePath = os.path.join(
+ r"C:\\",
+ "Users",
+ "mjarrett",
+ "Desktop",
+ "scratch",
+ "hsmith",
+ fileName,
+)
+o = loadOperator(filePath, cycle, node, allowMissing=True)
diff --git a/armi/bookkeeping/db/jaggedArray.py b/armi/bookkeeping/db/jaggedArray.py
new file mode 100644
index 000000000..c28b98746
--- /dev/null
+++ b/armi/bookkeeping/db/jaggedArray.py
@@ -0,0 +1,193 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Tooling to help flatten jagged (non rectangular) data into rectangular arrays.
+
+The goal here is to support jagged data for NumPy arrays to be written into the ARMI databases.
+"""
+
+from typing import List, Optional
+
+import numpy as np
+
+from armi import runLog
+
+
+class JaggedArray:
+ """
+ Take a list of numpy arrays or lists and flatten them into a single 1D array.
+
+ This implementation can preserve the structure of a multi-dimensional numpy array
+ by storing the dimensions in self.shapes and then re-populating a numpy array of
+ that shape from the flattened 1D array. However, it can only preserve one layer of
+ jaggedness in a list of lists (or other iterables). For example, a list of tuples
+ with varying lengths can be flattened and reconstituted exactly. But, if a list of
+ lists of tuples is passed in, the tuples in that final layer of nesting will all be
+ flattened to a single 1D numpy array after a round trip. No structure is retained
+ from nested lists of jagged lists or tuples.
+ """
+
+ def __init__(self, jaggedData, paramName):
+ """
+ JaggedArray constructor.
+
+ Parameters
+ ----------
+ jaggedData: list of np.ndarray
+ A list of numpy arrays (or lists or tuples) to be flattened into a single array
+ paramName: str
+ The name of the parameter represented by this data
+ """
+ offset = 0
+ flattenedArray = []
+ offsets = []
+ shapes = []
+ nones = []
+ for i, arr in enumerate(jaggedData):
+ if isinstance(arr, (np.ndarray, list, tuple)):
+ if len(arr) == 0:
+ nones.append(i)
+ else:
+ offsets.append(offset)
+ try:
+ numpyArray = np.array(arr)
+ shapes.append(numpyArray.shape)
+ offset += numpyArray.size
+ flattenedArray.extend(numpyArray.flatten())
+ except: # noqa: E722
+ # numpy might fail if it's jagged
+ flattenedList = self.flatten(arr)
+ shapes.append(
+ len(flattenedList),
+ )
+ offset += len(flattenedList)
+ flattenedArray.extend(flattenedList)
+ elif isinstance(arr, (int, float)):
+ offsets.append(offset)
+ shapes.append((1,))
+ offset += 1
+ flattenedArray.append(arr)
+ elif arr is None:
+ nones.append(i)
+
+ self.flattenedArray = np.array(flattenedArray)
+ self.offsets = np.array(offsets)
+ try:
+ self.shapes = np.array(shapes)
+ except ValueError as ee:
+ runLog.error(
+ "Error! It seems like ARMI may have tried to flatten a jagged array "
+ "where the elements have different numbers of dimensions. `shapes` "
+ "attribute of the JaggedArray for {} cannot be made into a numpy "
+ "array; it might be jagged.".format(paramName)
+ )
+ runLog.error(shapes)
+ raise ValueError(ee)
+ self.nones = np.array(nones)
+ self.dtype = self.flattenedArray.dtype
+ self.paramName = paramName
+
+ def __iter__(self):
+ """Iterate over the unpacked list."""
+ return iter(self.unpack())
+
+ def __contains__(self, other):
+ return other in self.flattenedArray
+
+ @staticmethod
+ def flatten(x):
+ """
+ Recursively flatten an iterable (list, tuple, or numpy.ndarray).
+
+ x : list, tuple, np.ndarray
+ An iterable. Can be a nested iterable in which the elements
+ themselves are also iterable.
+ """
+ if isinstance(x, (list, tuple, np.ndarray)):
+ if len(x) == 0:
+ return []
+ first, rest = x[0], x[1:]
+ return JaggedArray.flatten(first) + JaggedArray.flatten(rest)
+ else:
+ return [x]
+
+ @classmethod
+ def fromH5(cls, data, offsets, shapes, nones, dtype, paramName):
+ """
+ Create a JaggedArray instance from an HDF5 dataset.
+
+ The JaggedArray is stored in HDF5 as a flat 1D array with accompanying
+ attributes of "offsets" and "shapes" to define how to reconstitute the
+ original data.
+
+ Parameters
+ ----------
+ data: np.ndarray
+ A flattened 1D numpy array read in from an HDF5 file
+ offsets: np.ndarray
+ Offset indices for the zeroth element of each constituent array
+ shapes: np.ndarray
+ The shape of each constituent array
+ nones: np.ndarray
+ The location of Nones
+ dtype: np.dtype
+ The data type for the array
+ paramName: str
+ The name of the parameter represented by this data
+
+ Returns
+ -------
+ obj: JaggedArray An instance of JaggedArray populated with the input data
+ """
+ obj = cls([], paramName)
+ obj.flattenedArray = np.array(data)
+ obj.offsets = np.array(offsets)
+ obj.shapes = np.array(shapes)
+ obj.nones = np.array(nones)
+ obj.dtype = dtype
+ obj.paramName = paramName
+ return obj
+
+ def tolist(self):
+ """Alias for unpack() to make this class respond like a np.ndarray."""
+ return self.unpack()
+
+ def unpack(self):
+ """
+ Unpack a JaggedArray object into a list of arrays.
+
+ Returns
+ -------
+ unpackedJaggedData: list of np.ndarray
+ List of numpy arrays with varying dimensions (i.e., jagged arrays)
+ """
+ unpackedJaggedData: List[Optional[np.ndarray]] = []
+ shapeIndices = [i for i, x in enumerate(self.shapes) if sum(x) != 0]
+ numElements = len(shapeIndices) + len(self.nones)
+ j = 0 # non-None element counter
+ for i in range(numElements):
+ if i in self.nones:
+ unpackedJaggedData.append(None)
+ else:
+ k = shapeIndices[j]
+ unpackedJaggedData.append(
+ np.ndarray(
+ self.shapes[k],
+ dtype=self.dtype,
+ buffer=self.flattenedArray[self.offsets[k] :],
+ )
+ )
+ j += 1
+
+ return unpackedJaggedData
diff --git a/armi/bookkeeping/db/layout.py b/armi/bookkeeping/db/layout.py
index 4dad92606..c2be71adb 100644
--- a/armi/bookkeeping/db/layout.py
+++ b/armi/bookkeeping/db/layout.py
@@ -34,14 +34,14 @@
List,
)
-import numpy
+import numpy as np
from armi import runLog
+from armi.reactor import grids
+from armi.reactor.assemblyLists import AssemblyList
from armi.reactor.components import Component
from armi.reactor.composites import ArmiObject
-from armi.reactor import grids
from armi.reactor.reactors import Core
-from armi.reactor.assemblyLists import AssemblyList
from armi.reactor.reactors import Reactor
# Here we store the Database3 version information.
@@ -66,29 +66,29 @@
NONE_MAP = {float: float("nan"), str: ""}
NONE_MAP.update(
{
- intType: numpy.iinfo(intType).min + 2
+ intType: np.iinfo(intType).min + 2
for intType in (
int,
- numpy.int8,
- numpy.int16,
- numpy.int32,
- numpy.int64,
+ np.int8,
+ np.int16,
+ np.int32,
+ np.int64,
)
}
)
NONE_MAP.update(
{
- intType: numpy.iinfo(intType).max - 2
+ intType: np.iinfo(intType).max - 2
for intType in (
- numpy.uint,
- numpy.uint8,
- numpy.uint16,
- numpy.uint32,
- numpy.uint64,
+ np.uint,
+ np.uint8,
+ np.uint16,
+ np.uint32,
+ np.uint64,
)
}
)
-NONE_MAP.update({floatType: floatType("nan") for floatType in (float, numpy.float64)})
+NONE_MAP.update({floatType: floatType("nan") for floatType in (float, np.float64)})
class Layout:
@@ -231,7 +231,7 @@ def _createLayout(self, comp):
try:
self.temperatures.append((comp.inputTemperatureInC, comp.temperatureInC))
self.material.append(comp.material.__class__.__name__)
- except: # noqa: bare-except
+ except Exception:
self.temperatures.append((-900, -900)) # an impossible temperature
self.material.append("")
@@ -262,18 +262,18 @@ def _readLayout(self, h5group):
# location is either an index, or a point
# iter over list is faster
locations = h5group["layout/location"][:].tolist()
- self.locationType = numpy.char.decode(
+ self.locationType = np.char.decode(
h5group["layout/locationType"][:]
).tolist()
self.location = _unpackLocations(
self.locationType, locations, self.version[1]
)
- self.type = numpy.char.decode(h5group["layout/type"][:])
- self.name = numpy.char.decode(h5group["layout/name"][:])
+ self.type = np.char.decode(h5group["layout/type"][:])
+ self.name = np.char.decode(h5group["layout/name"][:])
self.serialNum = h5group["layout/serialNum"][:]
self.indexInData = h5group["layout/indexInData"][:]
self.numChildren = h5group["layout/numChildren"][:]
- self.material = numpy.char.decode(h5group["layout/material"][:])
+ self.material = np.char.decode(h5group["layout/material"][:])
self.temperatures = h5group["layout/temperatures"][:]
self.gridIndex = replaceNonsenseWithNones(
h5group["layout/gridIndex"][:], "layout/gridIndex"
@@ -403,12 +403,12 @@ def writeToDB(self, h5group):
try:
h5group.create_dataset(
"layout/type",
- data=numpy.array(self.type).astype("S"),
+ data=np.array(self.type).astype("S"),
compression="gzip",
)
h5group.create_dataset(
"layout/name",
- data=numpy.array(self.name).astype("S"),
+ data=np.array(self.name).astype("S"),
compression="gzip",
)
h5group.create_dataset(
@@ -418,58 +418,79 @@ def writeToDB(self, h5group):
"layout/indexInData", data=self.indexInData, compression="gzip"
)
h5group.create_dataset(
- "layout/numChildren", data=self.numChildren, compression="gzip"
+ "layout/numChildren",
+ data=self.numChildren,
+ compression="gzip",
+ track_order=True,
)
h5group.create_dataset(
- "layout/location", data=self.location, compression="gzip"
+ "layout/location",
+ data=self.location,
+ compression="gzip",
+ track_order=True,
)
h5group.create_dataset(
"layout/locationType",
- data=numpy.array(self.locationType).astype("S"),
+ data=np.array(self.locationType).astype("S"),
compression="gzip",
+ track_order=True,
)
h5group.create_dataset(
"layout/material",
- data=numpy.array(self.material).astype("S"),
+ data=np.array(self.material).astype("S"),
compression="gzip",
+ track_order=True,
)
h5group.create_dataset(
- "layout/temperatures", data=self.temperatures, compression="gzip"
+ "layout/temperatures",
+ data=self.temperatures,
+ compression="gzip",
+ track_order=True,
)
h5group.create_dataset(
"layout/gridIndex",
data=replaceNonesWithNonsense(
- numpy.array(self.gridIndex), "layout/gridIndex"
+ np.array(self.gridIndex), "layout/gridIndex"
),
compression="gzip",
)
- gridsGroup = h5group.create_group("layout/grids")
+ gridsGroup = h5group.create_group("layout/grids", track_order=True)
gridsGroup.attrs["nGrids"] = len(self.gridParams)
gridsGroup.create_dataset(
- "type", data=numpy.array([gp[0] for gp in self.gridParams]).astype("S")
+ "type",
+ data=np.array([gp[0] for gp in self.gridParams]).astype("S"),
+ track_order=True,
)
for igrid, gridParams in enumerate(gp[1] for gp in self.gridParams):
- thisGroup = gridsGroup.create_group(str(igrid))
- thisGroup.create_dataset("unitSteps", data=gridParams.unitSteps)
+ thisGroup = gridsGroup.create_group(str(igrid), track_order=True)
+ thisGroup.create_dataset(
+ "unitSteps", data=gridParams.unitSteps, track_order=True
+ )
for ibound, bound in enumerate(gridParams.bounds):
if bound is not None:
- bound = numpy.array(bound)
- thisGroup.create_dataset("bounds_{}".format(ibound), data=bound)
+ bound = np.array(bound)
+ thisGroup.create_dataset(
+ "bounds_{}".format(ibound), data=bound, track_order=True
+ )
thisGroup.create_dataset(
- "unitStepLimits", data=gridParams.unitStepLimits
+ "unitStepLimits", data=gridParams.unitStepLimits, track_order=True
)
offset = gridParams.offset
thisGroup.attrs["offset"] = offset is not None
if offset is not None:
- thisGroup.create_dataset("offset", data=offset)
- thisGroup.create_dataset("geomType", data=gridParams.geomType)
- thisGroup.create_dataset("symmetry", data=gridParams.symmetry)
+ thisGroup.create_dataset("offset", data=offset, track_order=True)
+ thisGroup.create_dataset(
+ "geomType", data=gridParams.geomType, track_order=True
+ )
+ thisGroup.create_dataset(
+ "symmetry", data=gridParams.symmetry, track_order=True
+ )
except RuntimeError:
runLog.error("Failed to create datasets in: {}".format(h5group))
raise
@@ -719,8 +740,8 @@ def _unpackLocationsV2(locationTypes, locData):
def replaceNonesWithNonsense(
- data: numpy.ndarray, paramName: str, nones: numpy.ndarray = None
-) -> numpy.ndarray:
+ data: np.ndarray, paramName: str, nones: np.ndarray = None
+) -> np.ndarray:
"""
Replace instances of ``None`` with nonsense values that can be detected/recovered
when reading.
@@ -760,7 +781,7 @@ def replaceNonesWithNonsense(
Reverses this operation.
"""
if nones is None:
- nones = numpy.where([d is None for d in data])[0]
+ nones = np.where([d is None for d in data])[0]
try:
# loop to find what the default value should be. This is the first non-None
@@ -770,7 +791,7 @@ def replaceNonesWithNonsense(
val = None
for val in data:
- if isinstance(val, numpy.ndarray):
+ if isinstance(val, np.ndarray):
# if multi-dimensional, val[0] could still be an array, val.flat is
# a flattened iterator, so next(val.flat) gives the first value in
# an n-dimensional array
@@ -779,8 +800,8 @@ def replaceNonesWithNonsense(
if realType is type(None):
continue
- defaultValue = numpy.reshape(
- numpy.repeat(NONE_MAP[realType], val.size), val.shape
+ defaultValue = np.reshape(
+ np.repeat(NONE_MAP[realType], val.size), val.shape
)
break
else:
@@ -797,8 +818,8 @@ def replaceNonesWithNonsense(
realType = float
defaultValue = NONE_MAP[realType]
- if isinstance(val, numpy.ndarray):
- data = numpy.array([d if d is not None else defaultValue for d in data])
+ if isinstance(val, np.ndarray):
+ data = np.array([d if d is not None else defaultValue for d in data])
else:
data[nones] = defaultValue
@@ -816,7 +837,7 @@ def replaceNonesWithNonsense(
try:
data = data.astype(realType)
- except: # noqa: bare-except
+ except Exception:
raise ValueError(
"Could not coerce data for {} to {}, data:\n{}".format(
paramName, realType, data
@@ -833,7 +854,7 @@ def replaceNonesWithNonsense(
return data
-def replaceNonsenseWithNones(data: numpy.ndarray, paramName: str) -> numpy.ndarray:
+def replaceNonsenseWithNones(data: np.ndarray, paramName: str) -> np.ndarray:
"""
Replace special nonsense values with ``None``.
@@ -853,11 +874,11 @@ def replaceNonsenseWithNones(data: numpy.ndarray, paramName: str) -> numpy.ndarr
replaceNonesWithNonsense
"""
# NOTE: This is closely-related to the NONE_MAP.
- if numpy.issubdtype(data.dtype, numpy.floating):
- isNone = numpy.isnan(data)
- elif numpy.issubdtype(data.dtype, numpy.integer):
- isNone = data == numpy.iinfo(data.dtype).min + 2
- elif numpy.issubdtype(data.dtype, numpy.str_):
+ if np.issubdtype(data.dtype, np.floating):
+ isNone = np.isnan(data)
+ elif np.issubdtype(data.dtype, np.integer):
+ isNone = data == np.iinfo(data.dtype).min + 2
+ elif np.issubdtype(data.dtype, np.str_):
isNone = data == ""
else:
raise TypeError(
@@ -865,18 +886,18 @@ def replaceNonsenseWithNones(data: numpy.ndarray, paramName: str) -> numpy.ndarr
)
if data.ndim > 1:
- result = numpy.ndarray(data.shape[0], dtype=numpy.dtype("O"))
+ result = np.ndarray(data.shape[0], dtype=np.dtype("O"))
for i in range(data.shape[0]):
if isNone[i].all():
result[i] = None
elif isNone[i].any():
# This is the meat of the logic to replace "nonsense" with None.
- result[i] = numpy.array(data[i], dtype=numpy.dtype("O"))
+ result[i] = np.array(data[i], dtype=np.dtype("O"))
result[i][isNone[i]] = None
else:
result[i] = data[i]
else:
- result = numpy.ndarray(data.shape, dtype=numpy.dtype("O"))
+ result = np.ndarray(data.shape, dtype=np.dtype("O"))
result[:] = data
result[isNone] = None
diff --git a/armi/bookkeeping/db/tests/test_comparedb3.py b/armi/bookkeeping/db/tests/test_comparedb3.py
index f482e6fa2..a19b94950 100644
--- a/armi/bookkeeping/db/tests/test_comparedb3.py
+++ b/armi/bookkeeping/db/tests/test_comparedb3.py
@@ -97,7 +97,9 @@ def test_compareDatabaseDuplicate(self):
"""End-to-end test of compareDatabases() on a photocopy database."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
- TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}
+ TEST_ROOT,
+ customSettings={"reloadDBName": "reloadingDB.h5"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
# create two DBs, identical but for file names
@@ -128,10 +130,12 @@ def test_compareDatabaseSim(self):
"""End-to-end test of compareDatabases() on very simlar databases."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
- TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}
+ TEST_ROOT,
+ customSettings={"reloadDBName": "reloadingDB.h5"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
- # create two DBs, identical but for file names
+ # create two DBs, identical but for file names and cycle lengths
dbs = []
for lenCycle in range(1, 3):
# build some test data
@@ -177,7 +181,7 @@ def test_compareDatabaseSim(self):
dbs[1]._fullPath,
timestepCompare=[(0, 0), (0, 1)],
)
- self.assertEqual(len(diffs.diffs), 474)
+ self.assertEqual(len(diffs.diffs), 477)
# Cycle length is only diff (x3)
self.assertEqual(diffs.nDiffs(), 3)
diff --git a/armi/bookkeeping/db/tests/test_database3.py b/armi/bookkeeping/db/tests/test_database3.py
index d7c4eca7d..1c2530deb 100644
--- a/armi/bookkeeping/db/tests/test_database3.py
+++ b/armi/bookkeeping/db/tests/test_database3.py
@@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Database3 class."""
-from distutils.spawn import find_executable
+import shutil
import subprocess
import unittest
import h5py
-import numpy
+import numpy as np
from armi.bookkeeping.db import _getH5File
from armi.bookkeeping.db import database3
+from armi.bookkeeping.db.jaggedArray import JaggedArray
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.reactor import parameters
from armi.reactor.tests.test_reactors import loadTestReactor, reduceTestReactorRings
@@ -31,14 +32,14 @@
# determine if this is a parallel run, and git is installed
GIT_EXE = None
-if find_executable("git") is not None:
+if shutil.which("git") is not None:
GIT_EXE = "git"
-elif find_executable("git.exe") is not None:
+elif shutil.which("git.exe") is not None:
GIT_EXE = "git.exe"
class TestDatabase3(unittest.TestCase):
- """Tests for the Database3 class."""
+ """Tests for the Database3 class that require a large, complicated reactor."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
@@ -62,78 +63,6 @@ def tearDown(self):
self.stateRetainer.__exit__()
self.td.__exit__(None, None, None)
- def test_writeToDB(self):
- """Test writing to the database.
-
- .. test:: Write a single time step of data to the database.
- :id: T_ARMI_DB_TIME
- :tests: R_ARMI_DB_TIME
- """
- self.r.p.cycle = 0
- self.r.p.timeNode = 0
- self.r.p.cycleLength = 0
-
- # Adding some nonsense in, to test NoDefault params
- self.r.p.availabilityFactor = parameters.NoDefault
-
- # validate that the H5 file gets bigger after the write
- self.assertEqual(list(self.db.h5db.keys()), ["inputs"])
- self.db.writeToDB(self.r)
- self.assertEqual(sorted(self.db.h5db.keys()), ["c00n00", "inputs"])
-
- # check the keys for a single time step
- keys = [
- "Circle",
- "Core",
- "DerivedShape",
- "Helix",
- "HexAssembly",
- "HexBlock",
- "Hexagon",
- "Reactor",
- "SpentFuelPool",
- "layout",
- ]
- self.assertEqual(sorted(self.db.h5db["c00n00"].keys()), sorted(keys))
-
- # validate availabilityFactor did not make it into the H5 file
- rKeys = [
- "maxAssemNum",
- "cycle",
- "cycleLength",
- "flags",
- "serialNum",
- "timeNode",
- ]
- self.assertEqual(
- sorted(self.db.h5db["c00n00"]["Reactor"].keys()), sorted(rKeys)
- )
-
- def test_getH5File(self):
- """
- Get the h5 file for the database, because that file format is language-agnostic.
-
- .. test:: Show the database is H5-formatted.
- :id: T_ARMI_DB_H5
- :tests: R_ARMI_DB_H5
- """
- with self.assertRaises(TypeError):
- _getH5File(None)
-
- h5 = _getH5File(self.db)
- self.assertEqual(type(h5), h5py.File)
-
- def makeHistory(self):
- """Walk the reactor through a few time steps and write them to the db."""
- for cycle, node in ((cycle, node) for cycle in range(2) for node in range(2)):
- self.r.p.cycle = cycle
- self.r.p.timeNode = node
- # something that splitDatabase won't change, so that we can make sure that
- # the right data went to the right new groups/cycles
- self.r.p.cycleLength = cycle
-
- self.db.writeToDB(self.r)
-
def makeShuffleHistory(self):
"""Walk the reactor through a few time steps with some shuffling."""
# Serial numbers *are not stable* (i.e., they can be different between test runs
@@ -174,191 +103,6 @@ def makeShuffleHistory(self):
# add some fake missing parameter data to test allowMissing
self.db.h5db["c00n00/Reactor/missingParam"] = "i don't exist"
- def _compareArrays(self, ref, src):
- """
- Compare two numpy arrays.
-
- Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged
- data, etc.) is really difficult. For now, convert to a list and compare
- element-by-element.
- """
- self.assertEqual(type(ref), type(src))
- if isinstance(ref, numpy.ndarray):
- ref = ref.tolist()
- src = src.tolist()
-
- for v1, v2 in zip(ref, src):
- # Entries may be None
- if isinstance(v1, numpy.ndarray):
- v1 = v1.tolist()
- if isinstance(v2, numpy.ndarray):
- v2 = v2.tolist()
- self.assertEqual(v1, v2)
-
- def _compareRoundTrip(self, data):
- """Make sure that data is unchanged by packing/unpacking."""
- packed, attrs = database3.packSpecialData(data, "testing")
- roundTrip = database3.unpackSpecialData(packed, attrs, "testing")
- self._compareArrays(data, roundTrip)
-
- def test_prepRestartRun(self):
- """
- This test is based on the armiRun.yaml case that is loaded during the `setUp`
- above. In that cs, `reloadDBName` is set to 'reloadingDB.h5', `startCycle` = 1,
- and `startNode` = 2. The nonexistent 'reloadingDB.h5' must first be
- created here for this test.
-
- .. test:: Runs can be restarted from a snapshot.
- :id: T_ARMI_SNAPSHOT_RESTART
- :tests: R_ARMI_SNAPSHOT_RESTART
- """
- # first successfully call to prepRestartRun
- o, r = loadTestReactor(
- TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}
- )
- cs = o.cs
- reduceTestReactorRings(r, cs, maxNumRings=3)
-
- ratedPower = cs["power"]
- startCycle = cs["startCycle"]
- startNode = cs["startNode"]
- cyclesSetting = [
- {"step days": [1000, 1000], "power fractions": [1, 1]},
- {"step days": [1000, 1000], "power fractions": [1, 1]},
- {"step days": [1000, 1000], "power fractions": [1, 1]},
- ]
- cycleP, nodeP = getPreviousTimeNode(startCycle, startNode, cs)
- cyclesSetting[cycleP]["power fractions"][nodeP] = 0.5
- numCycles = 2
- numNodes = 2
- cs = cs.modified(
- newSettings={
- "nCycles": numCycles,
- "cycles": cyclesSetting,
- "reloadDBName": "something_fake.h5",
- }
- )
-
- # create a db based on the cs
- dbi = DatabaseInterface(r, cs)
- dbi.initDB(fName="reloadingDB.h5")
- db = dbi.database
-
- # populate the db with some things
- for cycle, node in (
- (cycle, node) for cycle in range(numCycles) for node in range(numNodes)
- ):
- r.p.cycle = cycle
- r.p.timeNode = node
- r.p.cycleLength = sum(cyclesSetting[cycle]["step days"])
- r.core.p.power = ratedPower * cyclesSetting[cycle]["power fractions"][node]
- db.writeToDB(r)
- db.close()
-
- self.dbi.prepRestartRun()
-
- # prove that the reloaded reactor has the correct power
- self.assertEqual(self.o.r.p.cycle, cycleP)
- self.assertEqual(self.o.r.p.timeNode, nodeP)
- self.assertEqual(cyclesSetting[cycleP]["power fractions"][nodeP], 0.5)
- self.assertEqual(
- self.o.r.core.p.power,
- ratedPower * cyclesSetting[cycleP]["power fractions"][nodeP],
- )
-
- # now make the cycle histories clash and confirm that an error is thrown
- cs = cs.modified(
- newSettings={
- "cycles": [
- {"step days": [666, 666], "power fractions": [1, 1]},
- {"step days": [666, 666], "power fractions": [1, 1]},
- {"step days": [666, 666], "power fractions": [1, 1]},
- ],
- }
- )
-
- # create a db based on the cs
- dbi = DatabaseInterface(r, cs)
- dbi.initDB(fName="reloadingDB.h5")
- db = dbi.database
-
- # populate the db with something
- for cycle, node in (
- (cycle, node) for cycle in range(numCycles) for node in range(numNodes)
- ):
- r.p.cycle = cycle
- r.p.timeNode = node
- r.p.cycleLength = 2000
- db.writeToDB(r)
- db.close()
-
- with self.assertRaises(ValueError):
- self.dbi.prepRestartRun()
-
- def test_computeParents(self):
- # The below arrays represent a tree structure like this:
- # 71 -----------------------.
- # | \
- # 12--.-----.------. 72
- # / | \ \ \
- # 22 30 4---. 6 18-.
- # / | | | \ \ / | \
- # 8 17 2 32 52 62 1 9 10
- #
- # This should cover a handful of corner cases
- numChildren = [2, 5, 2, 0, 0, 1, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0]
- serialNums = [71, 12, 22, 8, 17, 30, 2, 4, 32, 53, 62, 6, 18, 1, 9, 10, 72]
-
- expected_1 = [None, 71, 12, 22, 22, 12, 30, 12, 4, 4, 4, 12, 12, 18, 18, 18, 71]
- expected_2 = [
- None,
- None,
- 71,
- 12,
- 12,
- 71,
- 12,
- 71,
- 12,
- 12,
- 12,
- 71,
- 71,
- 12,
- 12,
- 12,
- None,
- ]
- expected_3 = [
- None,
- None,
- None,
- 71,
- 71,
- None,
- 71,
- None,
- 71,
- 71,
- 71,
- None,
- None,
- 71,
- 71,
- 71,
- None,
- ]
-
- self.assertEqual(
- database3.Layout.computeAncestors(serialNums, numChildren), expected_1
- )
- self.assertEqual(
- database3.Layout.computeAncestors(serialNums, numChildren, 2), expected_2
- )
- self.assertEqual(
- database3.Layout.computeAncestors(serialNums, numChildren, 3), expected_3
- )
-
def test_load(self):
"""Load a reactor at different time steps, from the database.
@@ -389,54 +133,180 @@ def test_load(self):
with self.assertRaises(RuntimeError):
self.db.fileName = "whatever.h5"
- def test_loadSortSetting(self):
- self.makeShuffleHistory()
+ def test_loadSortSetting(self):
+ self.makeShuffleHistory()
+
+ # default load, should pass without error
+ r0 = self.db.load(0, 0, allowMissing=True)
+
+ # test that the reactor loads differently, dependent on the setting
+ cs = self.db.loadCS()
+ cs = cs.modified(newSettings={CONF_SORT_REACTOR: False})
+ r1 = self.db.load(0, 0, cs=cs, allowMissing=True)
+
+ # the reactor / core should be the same size
+ self.assertEqual(len(r0), len(r1))
+ self.assertEqual(len(r0.core), len(r1.core))
+
+ def test_history(self):
+ self.makeShuffleHistory()
+
+ grid = self.r.core.spatialGrid
+ testAssem = self.r.core.childrenByLocator[grid[0, 0, 0]]
+ testBlock = testAssem[-1]
+
+ # Test assem
+ hist = self.db.getHistoryByLocation(
+ testAssem, params=["chargeTime", "serialNum"]
+ )
+ expectedSn = {
+ (c, n): self.centralAssemSerialNums[c] for c in range(2) for n in range(2)
+ }
+ self.assertEqual(expectedSn, hist["serialNum"])
+
+ # test block
+ hists = self.db.getHistoriesByLocation(
+ [testBlock], params=["serialNum"], timeSteps=[(0, 0), (1, 0)]
+ )
+ expectedSn = {(c, 0): self.centralTopBlockSerialNums[c] for c in range(2)}
+ self.assertEqual(expectedSn, hists[testBlock]["serialNum"])
+
+ # cant mix blocks and assems, since they are different distance from core
+ with self.assertRaises(ValueError):
+ self.db.getHistoriesByLocation([testAssem, testBlock], params=["serialNum"])
+
+ # if requested time step isnt written, return no content
+ hist = self.dbi.getHistory(
+ self.r.core[0], params=["chargeTime", "serialNum"], byLocation=True
+ )
+ self.assertIn((2, 0), hist["chargeTime"].keys())
+ self.assertEqual(hist["chargeTime"][(2, 0)], 2)
+
+
+class TestDatabase3Smaller(unittest.TestCase):
+ """Tests for the Database3 class, that can use a smaller test reactor."""
+
+ def setUp(self):
+ self.td = TemporaryDirectoryChanger()
+ self.td.__enter__()
+ self.o, self.r = loadTestReactor(
+ TEST_ROOT,
+ customSettings={"reloadDBName": "reloadingDB.h5"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
+
+ self.dbi = DatabaseInterface(self.r, self.o.cs)
+ self.dbi.initDB(fName=self._testMethodName + ".h5")
+ self.db: database3.Database3 = self.dbi.database
+ self.stateRetainer = self.r.retainState().__enter__()
+
+ # used to test location-based history. see details below
+ self.centralAssemSerialNums = []
+ self.centralTopBlockSerialNums = []
+
+ def tearDown(self):
+ self.db.close()
+ self.stateRetainer.__exit__()
+ self.td.__exit__(None, None, None)
+
+ def makeHistory(self):
+ """Walk the reactor through a few time steps and write them to the db."""
+ for cycle, node in ((cycle, node) for cycle in range(2) for node in range(2)):
+ self.r.p.cycle = cycle
+ self.r.p.timeNode = node
+ # something that splitDatabase won't change, so that we can make sure that
+ # the right data went to the right new groups/cycles
+ self.r.p.cycleLength = cycle
+
+ self.db.writeToDB(self.r)
+
+ def _compareArrays(self, ref, src):
+ """
+ Compare two numpy arrays.
+
+ Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged
+ data, etc.) is really difficult. For now, convert to a list and compare
+ element-by-element.
+ """
+ self.assertEqual(type(ref), type(src))
+ if isinstance(ref, np.ndarray):
+ ref = ref.tolist()
+ src = src.tolist()
+
+ for v1, v2 in zip(ref, src):
+ # Entries may be None
+ if isinstance(v1, np.ndarray):
+ v1 = v1.tolist()
+ if isinstance(v2, np.ndarray):
+ v2 = v2.tolist()
+ self.assertEqual(v1, v2)
+
+ def _compareRoundTrip(self, data):
+ """Make sure that data is unchanged by packing/unpacking."""
+ packed, attrs = database3.packSpecialData(data, "testing")
+ roundTrip = database3.unpackSpecialData(packed, attrs, "testing")
+ self._compareArrays(data, roundTrip)
- # default load, should pass without error
- r0 = self.db.load(0, 0, allowMissing=True)
+ def test_writeToDB(self):
+ """Test writing to the database.
- # test that the reactor loads differently, dependent on the setting
- cs = self.db.loadCS()
- cs = cs.modified(newSettings={CONF_SORT_REACTOR: False})
- r1 = self.db.load(0, 0, cs=cs, allowMissing=True)
+ .. test:: Write a single time step of data to the database.
+ :id: T_ARMI_DB_TIME
+ :tests: R_ARMI_DB_TIME
+ """
+ self.r.p.cycle = 0
+ self.r.p.timeNode = 0
+ self.r.p.cycleLength = 0
- # the reactor / core should be the same size
- self.assertEqual(len(r0), len(r1))
- self.assertEqual(len(r0.core), len(r1.core))
+ # Adding some nonsense in, to test NoDefault params
+ self.r.p.availabilityFactor = parameters.NoDefault
- def test_history(self):
- self.makeShuffleHistory()
+ # validate that the H5 file gets bigger after the write
+ self.assertEqual(list(self.db.h5db.keys()), ["inputs"])
+ self.db.writeToDB(self.r)
+ self.assertEqual(sorted(self.db.h5db.keys()), ["c00n00", "inputs"])
- grid = self.r.core.spatialGrid
- testAssem = self.r.core.childrenByLocator[grid[0, 0, 0]]
- testBlock = testAssem[-1]
+ # check the keys for a single time step
+ keys = [
+ "Circle",
+ "Core",
+ "DerivedShape",
+ "Helix",
+ "HexAssembly",
+ "HexBlock",
+ "Hexagon",
+ "Reactor",
+ "SpentFuelPool",
+ "layout",
+ ]
+ self.assertEqual(sorted(self.db.h5db["c00n00"].keys()), sorted(keys))
- # Test assem
- hist = self.db.getHistoryByLocation(
- testAssem, params=["chargeTime", "serialNum"]
+ # validate availabilityFactor did not make it into the H5 file
+ rKeys = [
+ "maxAssemNum",
+ "cycle",
+ "cycleLength",
+ "flags",
+ "serialNum",
+ "timeNode",
+ ]
+ self.assertEqual(
+ sorted(self.db.h5db["c00n00"]["Reactor"].keys()), sorted(rKeys)
)
- expectedSn = {
- (c, n): self.centralAssemSerialNums[c] for c in range(2) for n in range(2)
- }
- self.assertEqual(expectedSn, hist["serialNum"])
- # test block
- hists = self.db.getHistoriesByLocation(
- [testBlock], params=["serialNum"], timeSteps=[(0, 0), (1, 0)]
- )
- expectedSn = {(c, 0): self.centralTopBlockSerialNums[c] for c in range(2)}
- self.assertEqual(expectedSn, hists[testBlock]["serialNum"])
+ def test_getH5File(self):
+ """
+ Get the h5 file for the database, because that file format is language-agnostic.
- # cant mix blocks and assems, since they are different distance from core
- with self.assertRaises(ValueError):
- self.db.getHistoriesByLocation([testAssem, testBlock], params=["serialNum"])
+ .. test:: Show the database is H5-formatted.
+ :id: T_ARMI_DB_H5
+ :tests: R_ARMI_DB_H5
+ """
+ with self.assertRaises(TypeError):
+ _getH5File(None)
- # if requested time step isnt written, return no content
- hist = self.dbi.getHistory(
- self.r.core[0], params=["chargeTime", "serialNum"], byLocation=True
- )
- self.assertIn((2, 0), hist["chargeTime"].keys())
- self.assertEqual(hist["chargeTime"][(2, 0)], 2)
+ h5 = _getH5File(self.db)
+ self.assertEqual(type(h5), h5py.File)
def test_auxData(self):
path = self.db.getAuxiliaryDataPath((2, 0), "test_stuff")
@@ -445,24 +315,19 @@ def test_auxData(self):
with self.assertRaises(KeyError):
self.db.genAuxiliaryData((-1, -1))
- # TODO: This should be expanded.
def test_replaceNones(self):
"""Super basic test that we handle Nones correctly in database read/writes."""
- data3 = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- data1 = numpy.array([1, 2, 3, 4, 5, 6, 7, 8])
- data1iNones = numpy.array([1, 2, None, 5, 6])
- data1fNones = numpy.array([None, 2.0, None, 5.0, 6.0])
- data2fNones = numpy.array(
- [None, [[1.0, 2.0, 6.0], [2.0, 3.0, 4.0]]], dtype=object
- )
- dataJag = numpy.array(
- [[[1, 2], [3, 4]], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]], dtype=object
- )
- dataJagNones = numpy.array(
- [[[1, 2], [3, 4]], [[1], [1]], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]],
- dtype=object,
- )
- dataDict = numpy.array(
+ data3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ data1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
+ data1iNones = np.array([1, 2, None, 5, 6])
+ data1fNones = np.array([None, 2.0, None, 5.0, 6.0])
+ data2fNones = np.array([None, [[1.0, 2.0, 6.0], [2.0, 3.0, 4.0]]], dtype=object)
+ twoByTwo = np.array([[1, 2], [3, 4]])
+ twoByOne = np.array([[1], [None]])
+ threeByThree = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ dataJag = JaggedArray([twoByTwo, threeByThree], "testParam")
+ dataJagNones = JaggedArray([twoByTwo, twoByOne, threeByThree], "testParam")
+ dataDict = np.array(
[{"bar": 2, "baz": 3}, {"foo": 4, "baz": 6}, {"foo": 7, "bar": 8}]
)
self._compareRoundTrip(data3)
@@ -482,12 +347,13 @@ def test_mergeHistory(self):
self.r.p.cycle = 1
self.r.p.timeNode = 0
tnGroup = self.db.getH5Group(self.r)
+ randomText = "this isn't a reference to another dataset"
database3.Database3._writeAttrs(
tnGroup["layout/serialNum"],
tnGroup,
{
- "fakeBigData": numpy.eye(6400),
- "someString": "this isn't a reference to another dataset",
+ "fakeBigData": np.eye(64),
+ "someString": randomText,
},
)
@@ -501,15 +367,15 @@ def test_mergeHistory(self):
# this test is a little bit implementation-specific, but nice to be explicit
self.assertEqual(
- tnGroup["layout/serialNum"].attrs["fakeBigData"],
- "@/c01n00/attrs/0_fakeBigData",
+ tnGroup["layout/serialNum"].attrs["someString"],
+ randomText,
)
# exercise the _resolveAttrs function
attrs = database3.Database3._resolveAttrs(
tnGroup["layout/serialNum"].attrs, tnGroup
)
- self.assertTrue(numpy.array_equal(attrs["fakeBigData"], numpy.eye(6400)))
+ self.assertTrue(np.array_equal(attrs["fakeBigData"], np.eye(64)))
keys = sorted(db2.keys())
self.assertEqual(len(keys), 4)
@@ -649,7 +515,6 @@ def test_readInputsFromDB(self):
# blueprints
self.assertGreater(len(inputs[2]), 100)
- self.assertIn("custom isotopics:", inputs[2])
self.assertIn("blocks:", inputs[2])
def test_deleting(self):
@@ -665,9 +530,167 @@ def test_open(self):
def test_loadCS(self):
cs = self.db.loadCS()
self.assertEqual(cs["numProcessors"], 1)
- self.assertEqual(cs["nCycles"], 6)
+ self.assertEqual(cs["nCycles"], 2)
def test_loadBlueprints(self):
bp = self.db.loadBlueprints()
self.assertIsNone(bp.nuclideFlags)
self.assertEqual(len(bp.assemblies), 0)
+
+ def test_prepRestartRun(self):
+ """
+ This test is based on the armiRun.yaml case that is loaded during the `setUp`
+ above. In that cs, `reloadDBName` is set to 'reloadingDB.h5', `startCycle` = 1,
+ and `startNode` = 2. The nonexistent 'reloadingDB.h5' must first be
+ created here for this test.
+
+ .. test:: Runs can be restarted from a snapshot.
+ :id: T_ARMI_SNAPSHOT_RESTART
+ :tests: R_ARMI_SNAPSHOT_RESTART
+ """
+ # first successfully call to prepRestartRun
+ o, r = loadTestReactor(
+ TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}
+ )
+ cs = o.cs
+ reduceTestReactorRings(r, cs, maxNumRings=3)
+
+ ratedPower = cs["power"]
+ startCycle = cs["startCycle"]
+ startNode = cs["startNode"]
+ cyclesSetting = [
+ {"step days": [1000, 1000], "power fractions": [1, 1]},
+ {"step days": [1000, 1000], "power fractions": [1, 1]},
+ {"step days": [1000, 1000], "power fractions": [1, 1]},
+ ]
+ cycleP, nodeP = getPreviousTimeNode(startCycle, startNode, cs)
+ cyclesSetting[cycleP]["power fractions"][nodeP] = 0.5
+ numCycles = 2
+ numNodes = 2
+ cs = cs.modified(
+ newSettings={
+ "nCycles": numCycles,
+ "cycles": cyclesSetting,
+ "reloadDBName": "something_fake.h5",
+ }
+ )
+
+ # create a db based on the cs
+ dbi = DatabaseInterface(r, cs)
+ dbi.initDB(fName="reloadingDB.h5")
+ db = dbi.database
+
+ # populate the db with some things
+ for cycle, node in (
+ (cycle, node) for cycle in range(numCycles) for node in range(numNodes)
+ ):
+ r.p.cycle = cycle
+ r.p.timeNode = node
+ r.p.cycleLength = sum(cyclesSetting[cycle]["step days"])
+ r.core.p.power = ratedPower * cyclesSetting[cycle]["power fractions"][node]
+ db.writeToDB(r)
+ db.close()
+
+ self.dbi.prepRestartRun()
+
+ # prove that the reloaded reactor has the correct power
+ self.assertEqual(self.o.r.p.cycle, cycleP)
+ self.assertEqual(self.o.r.p.timeNode, nodeP)
+ self.assertEqual(cyclesSetting[cycleP]["power fractions"][nodeP], 0.5)
+ self.assertEqual(
+ self.o.r.core.p.power,
+ ratedPower * cyclesSetting[cycleP]["power fractions"][nodeP],
+ )
+
+ # now make the cycle histories clash and confirm that an error is thrown
+ cs = cs.modified(
+ newSettings={
+ "cycles": [
+ {"step days": [666, 666], "power fractions": [1, 1]},
+ {"step days": [666, 666], "power fractions": [1, 1]},
+ {"step days": [666, 666], "power fractions": [1, 1]},
+ ],
+ }
+ )
+
+ # create a db based on the cs
+ dbi = DatabaseInterface(r, cs)
+ dbi.initDB(fName="reloadingDB.h5")
+ db = dbi.database
+
+ # populate the db with something
+ for cycle, node in (
+ (cycle, node) for cycle in range(numCycles) for node in range(numNodes)
+ ):
+ r.p.cycle = cycle
+ r.p.timeNode = node
+ r.p.cycleLength = 2000
+ db.writeToDB(r)
+ db.close()
+
+ with self.assertRaises(ValueError):
+ self.dbi.prepRestartRun()
+
+ def test_computeParents(self):
+ # The below arrays represent a tree structure like this:
+ # 71 -----------------------.
+ # | \
+ # 12--.-----.------. 72
+ # / | \ \ \
+ # 22 30 4---. 6 18-.
+ # / | | | \ \ / | \
+ # 8 17 2 32 52 62 1 9 10
+ #
+ # This should cover a handful of corner cases
+ numChildren = [2, 5, 2, 0, 0, 1, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0]
+ serialNums = [71, 12, 22, 8, 17, 30, 2, 4, 32, 53, 62, 6, 18, 1, 9, 10, 72]
+
+ expected_1 = [None, 71, 12, 22, 22, 12, 30, 12, 4, 4, 4, 12, 12, 18, 18, 18, 71]
+ expected_2 = [
+ None,
+ None,
+ 71,
+ 12,
+ 12,
+ 71,
+ 12,
+ 71,
+ 12,
+ 12,
+ 12,
+ 71,
+ 71,
+ 12,
+ 12,
+ 12,
+ None,
+ ]
+ expected_3 = [
+ None,
+ None,
+ None,
+ 71,
+ 71,
+ None,
+ 71,
+ None,
+ 71,
+ 71,
+ 71,
+ None,
+ None,
+ 71,
+ 71,
+ 71,
+ None,
+ ]
+
+ self.assertEqual(
+ database3.Layout.computeAncestors(serialNums, numChildren), expected_1
+ )
+ self.assertEqual(
+ database3.Layout.computeAncestors(serialNums, numChildren, 2), expected_2
+ )
+ self.assertEqual(
+ database3.Layout.computeAncestors(serialNums, numChildren, 3), expected_3
+ )
diff --git a/armi/bookkeeping/db/tests/test_databaseInterface.py b/armi/bookkeeping/db/tests/test_databaseInterface.py
index 342f5345d..b1542dfa7 100644
--- a/armi/bookkeeping/db/tests/test_databaseInterface.py
+++ b/armi/bookkeeping/db/tests/test_databaseInterface.py
@@ -17,7 +17,7 @@
import unittest
import h5py
-import numpy
+import numpy as np
from numpy.testing import assert_allclose, assert_equal
from armi import __version__ as version
@@ -46,7 +46,7 @@ def getSimpleDBOperator(cs):
It's used to make the db unit tests run very quickly.
"""
newSettings = {}
- newSettings[CONF_LOADING_FILE] = "refOneBlockReactor.yaml"
+ newSettings[CONF_LOADING_FILE] = "smallestTestReactor/refOneBlockReactor.yaml"
newSettings["verbosity"] = "important"
newSettings["db"] = True
newSettings["runType"] = "Standard"
@@ -77,13 +77,40 @@ def interactEveryNode(self, cycle, node):
self.action(cycle, node)
+class TestDatabaseInterfaceBOL(unittest.TestCase):
+ """Test the DatabaseInterface class at the BOL."""
+
+ def test_interactBOL(self):
+ """This test is in its own class, because of temporary directory issues."""
+ with directoryChangers.TemporaryDirectoryChanger():
+ self.o, self.r = loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ self.dbi = DatabaseInterface(self.r, self.o.cs)
+
+ dbName = f"{self._testMethodName}.h5"
+ self.dbi.initDB(fName=dbName)
+ self.db: Database3 = self.dbi.database
+ self.stateRetainer = self.r.retainState().__enter__()
+ self.assertIsNotNone(self.dbi._db)
+ self.dbi.interactBOL()
+ self.dbi.closeDB()
+ self.dbi._db = None
+ self.assertIsNone(self.dbi._db)
+
+ if os.path.exists(dbName):
+ os.remove(dbName)
+
+
class TestDatabaseInterface(unittest.TestCase):
"""Tests for the DatabaseInterface class."""
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
- self.o, self.r = loadTestReactor(TEST_ROOT)
+ self.o, self.r = loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.dbi = DatabaseInterface(self.r, self.o.cs)
self.dbi.initDB(fName=self._testMethodName + ".h5")
self.db: Database3 = self.dbi.database
@@ -95,30 +122,69 @@ def tearDown(self):
self.td.__exit__(None, None, None)
# test_interactBOL leaves behind some dirt (accessible after db close) that the
# TempDirChanger is not catching
- bolDirt = os.path.join(PROJECT_ROOT, "armiRun.h5")
- if os.path.exists(bolDirt):
- os.remove(bolDirt)
-
- def test_interactEveryNodeReturn(self):
- """Test that the DB is NOT written to if cs["tightCoupling"] = True."""
- self.o.cs["tightCoupling"] = True
- self.dbi.interactEveryNode(0, 0)
- self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
-
- def test_interactBOL(self):
- self.assertIsNotNone(self.dbi._db)
- self.dbi.interactBOL()
-
- self.dbi._db = None
- self.assertIsNone(self.dbi._db)
- self.dbi.interactBOL()
- self.assertIsNotNone(self.dbi._db)
+ bolDirt = [
+ os.path.join(PROJECT_ROOT, "armiRun.h5"),
+ os.path.join(PROJECT_ROOT, "armiRunSmallest.h5"),
+ ]
+ for dirt in bolDirt:
+ if os.path.exists(dirt):
+ os.remove(dirt)
def test_distributable(self):
self.assertEqual(self.dbi.distributable(), 4)
self.dbi.interactDistributeState()
self.assertEqual(self.dbi.distributable(), 4)
+ def test_demonstrateWritingInteractions(self):
+ """Test what nodes are written to the database during the interaction calls."""
+ self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps
+ r = self.r
+
+ # BOC/BOL doesn't write anything
+ r.p.cycle, r.p.timeNode = 0, 0
+ self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
+ self.dbi.interactBOL()
+ self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
+ self.dbi.interactBOC(0)
+ self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
+
+ # but the first time node does
+ self.dbi.interactEveryNode(0, 0)
+ self.assertTrue(self.dbi.database.hasTimeStep(0, 0))
+
+ # EOC 0 shouldn't write, its written by last time node
+ r.p.cycle, r.p.timeNode = 0, self.o.cs["burnSteps"]
+ self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+ self.dbi.interactEOC(r.p.cycle)
+ self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+
+ # The last node of the step should write though
+ self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+ self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
+ self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+
+ # EOL should also write, but lets write last time node first
+ r.p.cycle, r.p.timeNode = self.o.cs["nCycles"] - 1, self.o.cs["burnSteps"]
+ self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+ self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
+ self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
+
+ # now write EOL
+ self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL"))
+ self.dbi.interactEOL() # this also saves and closes db
+
+ # reopen db to show EOL is written
+ with Database3(self._testMethodName + ".h5", "r") as db:
+ self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL"))
+ # and confirm that last time node is still there/separate
+ self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode))
+
+ def test_interactEveryNodeReturnTightCoupling(self):
+ """Test that the DB is NOT written to if cs["tightCoupling"] = True."""
+ self.o.cs["tightCoupling"] = True
+ self.dbi.interactEveryNode(0, 0)
+ self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
+
def test_timeNodeLoop_tightCoupling(self):
"""Test that database is written out after the coupling loop has completed."""
# clear out interfaces (no need to run physics) but leave database
@@ -128,6 +194,55 @@ def test_timeNodeLoop_tightCoupling(self):
self.o._timeNodeLoop(0, 0)
self.assertTrue(self.dbi._db.hasTimeStep(0, 0))
+ def test_syncDbAfterWrite(self):
+ """
+ Test to ensure that the fast-path database is copied to working
+ directory at every time node when ``syncDbAfterWrite`` is ``True``.
+ """
+ r = self.r
+
+ self.o.cs["syncDbAfterWrite"] = True
+ self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps
+
+ self.dbi.interactBOL()
+ self.assertFalse(os.path.exists(self.dbi.database.fileName))
+
+ # Go through a few time nodes to ensure appending is working
+ for timeNode in range(self.o.cs["burnSteps"]):
+ r.p.cycle = 0
+ r.p.timeNode = timeNode
+ self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
+
+ # The file should have been copied to working directory
+ self.assertTrue(os.path.exists(self.dbi.database.fileName))
+
+ # The copied file should have the newest time node
+ with Database3(self.dbi.database.fileName, "r") as db:
+ for tn in range(timeNode + 1):
+ self.assertTrue(db.hasTimeStep(r.p.cycle, tn))
+
+ # The in-memory database should have been reloaded properly
+ for tn in range(timeNode + 1):
+ self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, tn))
+
+ # Make sure EOL runs smoothly
+ self.dbi.interactEOL()
+ self.assertTrue(os.path.exists(self.dbi.database.fileName))
+
+ def test_noSyncDbAfterWrite(self):
+ """
+ Test to ensure that the fast-path database is NOT copied to working
+ directory at every time node when ``syncDbAfterWrite`` is ``False``.
+ """
+ self.o.cs["syncDbAfterWrite"] = False
+
+ self.dbi.interactBOL()
+ self.assertFalse(os.path.exists(self.dbi.database.fileName))
+ self.dbi.interactEveryNode(0, 0)
+ self.assertFalse(os.path.exists(self.dbi.database.fileName))
+ self.dbi.interactEOL()
+ self.assertTrue(os.path.exists(self.dbi.database.fileName))
+
class TestDatabaseWriter(unittest.TestCase):
def setUp(self):
@@ -300,7 +415,8 @@ def setUpClass(cls):
# than the original input file. This allows settings to be
# changed in memory like this and survive for testing.
newSettings = {"verbosity": "extra"}
- newSettings["nCycles"] = 2
+ cls.nCycles = 2
+ newSettings["nCycles"] = cls.nCycles
newSettings["burnSteps"] = 2
o, r = loadTestReactor(customSettings=newSettings)
reduceTestReactorRings(r, o.cs, 3)
@@ -314,7 +430,7 @@ def setUpClass(cls):
def writeFlux(cycle, node):
for bi, b in enumerate(o.r.core.getBlocks()):
b.p.flux = 1e6 * bi + cycle * 100 + node
- b.p.mgFlux = numpy.repeat(b.p.flux / 33, 33)
+ b.p.mgFlux = np.repeat(b.p.flux / 33, 33)
o.interfaces.insert(0, MockInterface(o.r, o.cs, writeFlux))
with o:
@@ -402,8 +518,8 @@ def test_readWritten(self):
self.assertEqual(c1.name, c2.name)
if isinstance(c1.spatialLocator, grids.MultiIndexLocation):
assert_equal(
- numpy.array(c1.spatialLocator.indices),
- numpy.array(c2.spatialLocator.indices),
+ np.array(c1.spatialLocator.indices),
+ np.array(c2.spatialLocator.indices),
)
else:
assert_equal(
@@ -440,8 +556,8 @@ def test_variousTypesWork(self):
b1 = self.r.core.getFirstBlock(Flags.FUEL)
b2 = r2.core.getFirstBlock(Flags.FUEL)
- self.assertIsInstance(b1.p.mgFlux, numpy.ndarray)
- self.assertIsInstance(b2.p.mgFlux, numpy.ndarray)
+ self.assertIsInstance(b1.p.mgFlux, np.ndarray)
+ self.assertIsInstance(b2.p.mgFlux, np.ndarray)
assert_allclose(b1, b2)
c1 = b1.getComponent(Flags.FUEL)
@@ -460,6 +576,18 @@ def test_variousTypesWork(self):
assert_allclose(numDensVec1, numDensVec2)
+ def test_timesteps(self):
+ with Database3(self.dbName, "r") as db:
+ # build time steps in the DB file
+ timesteps = []
+ for cycle in range(self.nCycles):
+ for bStep in range(3):
+ timesteps.append(f"/c0{cycle}n0{bStep}")
+ timesteps.append("/c01n02EOL")
+
+ # verify the timesteps are correct, including the EOL
+ self.assertEqual(list(db.keys()), timesteps)
+
class TestBadName(unittest.TestCase):
def test_badDBName(self):
diff --git a/armi/bookkeeping/db/tests/test_jaggedArray.py b/armi/bookkeeping/db/tests/test_jaggedArray.py
new file mode 100644
index 000000000..9cc63188a
--- /dev/null
+++ b/armi/bookkeeping/db/tests/test_jaggedArray.py
@@ -0,0 +1,166 @@
+# Copyright 2019 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests for the JaggedArray class."""
+import unittest
+
+import h5py
+import numpy as np
+
+from armi.bookkeeping.db.jaggedArray import JaggedArray
+from armi.utils.directoryChangers import TemporaryDirectoryChanger
+
+
+class TestJaggedArray(unittest.TestCase):
+ """Tests for the JaggedArray class."""
+
+ def setUp(self):
+ self.td = TemporaryDirectoryChanger()
+ self.td.__enter__()
+
+ def tearDown(self):
+ self.td.__exit__(None, None, None)
+
+ def test_roundTrip(self):
+ """Basic test that we handle Nones correctly in database read/writes."""
+ dataSet = [1, 2.0, None, [], [3, 4], (5, 6, 7), np.array([8, 9, 10, 11])]
+ self._compareRoundTrip(dataSet, "test-numbers")
+
+ def test_roundTripBool(self):
+ """Basic test that we handle Nones correctly in database read/writes."""
+ dataSet = [True, True, [False, True, False]]
+ self._compareRoundTrip(dataSet, "test-bool")
+
+ def test_flatten(self):
+ """Test the recursive flattening static method."""
+ testdata = [(1, 2), [3, 4, 5], [], None, 6, np.array([7, 8, 9])]
+ flatArray = JaggedArray.flatten(testdata)
+ self.assertEqual(flatArray, [1, 2, 3, 4, 5, None, 6, 7, 8, 9])
+
+ def test_backwardsCompatible(self):
+ """
+ Test that the new JaggedArray can unpack the old database jagged data format.
+
+ The "old" database format contains shapes and offsets for locations that have None.
+ The "new" database format only contains shapes and offsets for non-None values.
+ The "new" unpacking routine is able to read either format.
+ """
+ paramName = "test_old"
+ data = [[1, 2], None, [3, 4, 5], None, None, [6, 7, 8, 9]]
+ flattenedArray = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
+ shapes = [(2,), (0,), (3,), (0,), (0,), (4,)]
+ offsets = [0, 2, 2, 5, 5, 5, 5]
+ nones = [1, 3, 4]
+ h5file = "test_oldFormat.h5"
+ with h5py.File(h5file, "w") as hf:
+ dset = hf.create_dataset(
+ data=flattenedArray,
+ name=paramName,
+ )
+ dset.attrs["jagged"] = True
+ dset.attrs["offsets"] = offsets
+ dset.attrs["shapes"] = shapes
+ dset.attrs["noneLocations"] = nones
+
+ with h5py.File(h5file, "r") as hf:
+ dataset = hf[paramName]
+ values = dataset[()]
+ offsets = dataset.attrs["offsets"]
+ shapes = dataset.attrs["shapes"]
+ nones = dataset.attrs["noneLocations"]
+
+ roundTrip = JaggedArray.fromH5(
+ values,
+ offsets,
+ shapes,
+ nones,
+ dtype=flattenedArray.dtype,
+ paramName=paramName,
+ )
+ self._compareArrays(data, roundTrip)
+
+ def _compareRoundTrip(self, data, paramName):
+ """Make sure that data is unchanged by packing/unpacking."""
+ jaggedArray = JaggedArray(data, paramName)
+
+ # write to HDF5
+ h5file = "test_jaggedArray.h5"
+ with h5py.File(h5file, "w") as hf:
+ dset = hf.create_dataset(
+ data=jaggedArray.flattenedArray,
+ name=jaggedArray.paramName,
+ )
+ dset.attrs["jagged"] = True
+ dset.attrs["offsets"] = jaggedArray.offsets
+ dset.attrs["shapes"] = jaggedArray.shapes
+ dset.attrs["noneLocations"] = jaggedArray.nones
+
+ with h5py.File(h5file, "r") as hf:
+ dataset = hf[paramName]
+ values = dataset[()]
+ offsets = dataset.attrs["offsets"]
+ shapes = dataset.attrs["shapes"]
+ nones = dataset.attrs["noneLocations"]
+
+ roundTrip = JaggedArray.fromH5(
+ values,
+ offsets,
+ shapes,
+ nones,
+ dtype=jaggedArray.flattenedArray.dtype,
+ paramName=paramName,
+ )
+ self._compareArrays(data, roundTrip)
+
+ def _compareArrays(self, ref, src):
+ """
+ Compare two numpy arrays.
+
+ Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged
+ data, etc.) is really difficult. For now, convert to a list and compare
+ element-by-element.
+
+ Several types of data do not survive a round trip. The if-elif branch
+ here converts the initial data into the format expected to be produced
+ by the round trip. The conversions are:
+
+ - For scalar values (int, float, etc.), the data becomes a numpy
+ array with a dimension of 1 after the round trip.
+ - Tuples and lists become numpy arrays
+ - Empty lists become `None`
+
+ """
+ # self.assertEqual(type(src), JaggedArray)
+ if isinstance(ref, np.ndarray):
+ ref = ref.tolist()
+ src = src.tolist()
+
+ for v1, v2 in zip(ref, src):
+ # Entries may be None
+ if isinstance(v1, np.ndarray):
+ v1 = v1.tolist()
+ elif isinstance(v1, tuple):
+ v1 = list(v1)
+ elif isinstance(v1, int):
+ v1 = np.array([v1])
+ elif isinstance(v1, float):
+ v1 = np.array([v1], dtype=np.float64)
+ elif v1 is None:
+ pass
+ elif len(v1) == 0:
+ v1 = None
+
+ if isinstance(v2, np.ndarray):
+ v2 = v2.tolist()
+
+ self.assertEqual(v1, v2)
diff --git a/armi/bookkeeping/db/v110_to_v120.py b/armi/bookkeeping/db/v110_to_v120.py
new file mode 100644
index 000000000..2df041736
--- /dev/null
+++ b/armi/bookkeeping/db/v110_to_v120.py
@@ -0,0 +1,54 @@
+"""
+Nala Database Migration Script.
+
+From Nala v1.1.0 to v1.2.0
+
+Migration
+---------
+1. Fixes a corner case where Nala v1.2.0 cannot read a dataset from a v1.1.0
+ database if that dataset has a combination of empty arrays (i.e., []) and
+ `None`
+
+The migration just sets all of the empty arrays to be None. This shouldn't
+affect any downstream applications loading the data; an app shouldn't care
+whether data is [] or None.
+"""
+
+import os
+import shutil
+import sys
+
+import numpy as np
+import h5py
+
+
+def migrate110to120():
+ # copy old DB to new location
+ databaseFile = str(sys.argv[1])
+ if databaseFile.endswith(".h5"):
+ databaseFile = databaseFile[:-3]
+ baseName = os.path.basename(databaseFile)
+ newDatabaseFile = os.path.join(os.getcwd(), baseName + "-v1.2.0.h5")
+ shutil.copy(databaseFile + ".h5", newDatabaseFile)
+
+ # migration logic: write to new file
+ with h5py.File(newDatabaseFile, "r+") as f:
+ statepoints = [k for k in f.keys() if "c" in k]
+
+ # look for datasets with combinations of Nones and empty arrays
+ for state in statepoints:
+ blockData = f[state]["HexBlock"]
+ for paramName in blockData:
+ if blockData[paramName].attrs.get("jagged", False):
+ # make any location with a null shape a "None"
+ shapes = blockData[paramName].attrs.get("shapes")
+ # noneLocs = blockData[paramName].attrs.get("noneLocations")
+ noneLocs = []
+ for i, s in enumerate(shapes):
+ if sum(s) == 0:
+ noneLocs.append(i)
+ blockData[paramName].attrs["noneLocations"] = np.array(noneLocs)
+
+
+if __name__ == "__main__":
+ migrate110to120()
diff --git a/armi/bookkeeping/historyTracker.py b/armi/bookkeeping/historyTracker.py
index bfd52d4bc..995bbb5ef 100644
--- a/armi/bookkeeping/historyTracker.py
+++ b/armi/bookkeeping/historyTracker.py
@@ -16,14 +16,12 @@
The History Tracker is a bookkeeping interface that accesses and reports time-dependent state
information from the database.
-At the end of a run, these write text files to
-show the histories for various follow-on mechanical analysis,
-fuel performance analysis, etc.
+At the end of a run, these write text files to show the histories for various follow-on mechanical
+analysis, fuel performance analysis, etc.
-Other interfaces may find this useful as well, to get an assembly history
-for fuel performance analysis, etc. This is particularly useful in equilibrium runs,
-where the ``EqHistoryTrackerInterface`` will unravel the full history from a single
-equilibrium cycle.
+Other interfaces may find this useful as well, to get an assembly history for fuel performance
+analysis, etc. This is particularly useful in equilibrium runs, where the
+``EqHistoryTrackerInterface`` will unravel the full history from a single equilibrium cycle.
Getting history information
---------------------------
@@ -37,20 +35,21 @@
history.preloadBlockHistoryVals(blockNames, historyKeys, timeSteps)
-This is essential for performance when history information is going to be accessed
-in loops over assemblies or blocks. Reading each param directly from the database
-individually in loops is paralyzingly slow.
+This is essential for performance when history information is going to be accessed in loops over
+assemblies or blocks. Reading each param directly from the database individually in loops is
+paralyzingly slow.
Specifying parameters to add to the EOL history report
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To add state parameters to the list of things that get their history reported, you need to define an interface
-method called `getHistoryParams`. It should return a list of block parameters that will become available. For example::
+To add state parameters to the list of things that get their history reported, you need to define an
+interface method called `getHistoryParams`. It should return a list of block parameters that will
+become available. For example::
def getHistoryParams(self):
return ['flux', 'percentBu']
-When you'd like to access history information, you need to grab the history interface. The history interfaces is
-present by default in your interface stack. To get it, just call::
+When you'd like to access history information, you need to grab the history interface. The history
+interfaces is present by default in your interface stack. To get it, just call::
history = self.getInterface('history')
@@ -71,13 +70,12 @@ def getHistoryParams(self):
from typing import Tuple
import traceback
-import tabulate
-
from armi import interfaces
from armi import runLog
from armi import operators
from armi.reactor.flags import Flags
from armi.reactor import grids
+from armi.utils import tabulate
ORDER = 2 * interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING
@@ -129,11 +127,10 @@ def __init__(self, r, cs):
Warning
-------
- If the current timestep history is requested and the database has not yet
- been written this timestep, the current value of the requested parameter is
- provided. It is possible that this is not the value that will be written to
- the database during this time step since many interfaces that change
- parameters may interact between this call and the database write.
+ If the current timestep history is requested and the database has not yet been written this
+ timestep, the current value of the requested parameter is provided. It is possible that this
+ is not the value that will be written to the database during this time step since many
+ interfaces that change parameters may interact between this call and the database write.
"""
interfaces.Interface.__init__(self, r, cs)
self.detailAssemblyNames = []
@@ -296,10 +293,10 @@ def writeAssemHistory(self, a, fName=""):
headers = [str(ts).replace(" ", "") for ts in times.keys()]
out.write(
tabulate.tabulate(
+ data=(times.values(),),
headers=headers,
- tabular_data=(times.values(),),
- tablefmt="plain",
- floatfmt="11.5E",
+ tableFmt="plain",
+ floatFmt="11.5E",
)
)
out.write("\n")
@@ -318,7 +315,7 @@ def writeAssemHistory(self, a, fName=""):
out.write("\n\nkey: {0}\n".format(param))
data = [blockHistories[b][param].values() for b in blocks]
- out.write(tabulate.tabulate(data, tablefmt="plain", floatfmt="11.5E"))
+ out.write(tabulate.tabulate(data, tableFmt="plain", floatFmt="11.5E"))
out.write("\n")
# loc is a tuple, remove the spaces from the string representation so it is easy to load
@@ -328,14 +325,14 @@ def writeAssemHistory(self, a, fName=""):
for loc in dbi.getHistory(a, ["location"])["location"].values()
]
out.write("\n\nkey: location\n")
- out.write(tabulate.tabulate((location,), tablefmt="plain"))
+ out.write(tabulate.tabulate((location,), tableFmt="plain"))
out.write("\n\n\n")
headers = "EOL bottom top center".split()
data = [("", b.p.zbottom, b.p.ztop, b.p.z) for b in blocks]
out.write(
tabulate.tabulate(
- data, headers=headers, tablefmt="plain", floatfmt="10.3f"
+ data, headers=headers, tableFmt="plain", floatFmt="10.3f"
)
)
@@ -364,7 +361,7 @@ def preloadBlockHistoryVals(self, names, keys, timesteps):
keys = [key for key in keys if key != "loc"]
data = dbi.getHistories(blocks, keys, timesteps)
self._preloadedBlockHistory = data
- except: # noqa: bare-except
+ except Exception:
# fails during the beginning of standard runs, but that's ok
runLog.info(
"Unable to pre-load block history values due to error:"
diff --git a/armi/bookkeeping/mainInterface.py b/armi/bookkeeping/mainInterface.py
index 8bc93e1bf..226249a22 100644
--- a/armi/bookkeeping/mainInterface.py
+++ b/armi/bookkeeping/mainInterface.py
@@ -28,6 +28,8 @@
from armi import runLog
from armi import utils
from armi.bookkeeping.db.database3 import Database3
+from armi.settings.fwSettings.globalSettings import CONF_COPY_FILES_FROM
+from armi.settings.fwSettings.globalSettings import CONF_COPY_FILES_TO
from armi.utils import pathTools
from armi.utils.customExceptions import InputError
@@ -46,28 +48,26 @@ class MainInterface(interfaces.Interface):
Notes
-----
- Interacts early so that the database is accessible as soon as possible in the run.
- The database interfaces interacts near the end of the interface stack, but the main
- interface interacts first.
+ Interacts early so that the database is accessible as soon as possible in the run. The database
+ interfaces runs near the end of the interface stack, but the main interface interacts first.
"""
name = "main"
def interactBOL(self):
interfaces.Interface.interactBOL(self)
- self._activateDB()
+ self._activateDBPrepRestart()
self._moveFiles()
- def _activateDB(self):
+ def _activateDBPrepRestart(self):
"""
- Instantiate the database state.
+ Instantiate the database state, and add previous time nodes for restart run.
Notes
-----
- This happens here rather than on the database interface, as the database
- interacts near the end of the stack. Some interactBOL methods may be
- dependent on having data in the database, such as calls to history tracker
- during a restart run.
+ This happens here rather than on the database interface, as the database interacts near the
+ end of the stack. Some interactBOL methods may be dependent on having data in the database,
+ such as calls to history tracker during a restart run.
"""
dbi = self.o.getInterface("database")
if not dbi.enabled():
@@ -78,32 +78,64 @@ def _activateDB(self):
and self.cs["runType"] != operators.RunTypes.SNAPSHOTS
):
# load case before going forward with normal cycle
- runLog.important("MainInterface loading from DB")
+ runLog.important("MainInterface loading DB history for restart.")
- # Load the database from the point just before start cycle and start node
- # as the run will continue at the begining of start cycle and start node,
- # and the database contains the values from the run at the end of the
- # interface stack, which are what the start start cycle and start node
- # should begin with.
+ # Load the database from the point just before start cycle and start node as the run
+ # will continue at the beginning of start cycle and start node, and the database contains
+ # the values from the run at the end of the interface stack, which are what the start
+ # start cycle and start node should begin with.
- # NOTE: this should be the responsibility of the database, but cannot
- # because the Database is last in the stack and the MainInterface is
- # first
+ # NOTE: this should be the responsibility of the database, but cannot because the
+ # database is last in the stack and the MainInterface is first
dbi.prepRestartRun()
+
+ if self.cs["startNode"] == 0:
+ # DB interface loaded the previous time step (last time node of previous cycle), but
+ # this is BEFORE the EOC interactions have happened.
+ # so here we explicitly call the EOC interactions now and then proceed with normal
+ # BOL interactions for the cycle we are starting
+ runLog.important(
+ "MainInterface calling `o.interactAllEOC` due to "
+ "loading the last time node of the previous cycle."
+ )
+ self.o.interactAllEOC(self.r.p.cycle)
+
+ # advance time time since we loaded the previous time step
self.r.p.cycle = self.cs["startCycle"]
self.r.p.timeNode = self.cs["startNode"]
def _moveFiles(self):
- # check for orificed flow bounds files. These will often be named based on the
- # case that this one is dependent upon, but not always. For example, testSassys
- # is dependent on the safety case but requires physics bounds files. now copy
- # the files over
+ """
+ At the start of each run, arbitrary lists of user-defined files can be copied around.
+
+ This logic is controlled by the settings ``copyFilesFrom`` & ``copyFilesTo``.
+
+ ``copyFilesFrom`` :
+
+ - List of files to copy (cannot be directories).
+ - Can be of length zero (that just means no files will be copied).
+ - The file names listed can use the ``*`` glob syntax, to reference multiple files.
+
+
+ ``copyFilesTo`` :
+
+ - List of directories to copy the files into.
+ - Can be of length zero; all files will be copied to the local dir.
+ - Can be of length one; all files will be copied to that dir.
+ - The only other valid length for this list _must_ be the same length as the "from" list.
+
+ Notes
+ -----
+ If a provided "from" file is missing, this method will silently pass over that. It will only
+ check if the length of the "from" and "to" lists are valid in the end.
+ """
+ # handle a lot of asterisks and missing files
copyFilesFrom = [
filePath
- for possiblePat in self.cs["copyFilesFrom"]
- for filePath in glob.glob(possiblePat)
+ for possiblePath in self.cs[CONF_COPY_FILES_FROM]
+ for filePath in glob.glob(possiblePath)
]
- copyFilesTo = self.cs["copyFilesTo"]
+ copyFilesTo = self.cs[CONF_COPY_FILES_TO]
if len(copyFilesTo) in (len(copyFilesFrom), 0, 1):
# if any files to copy, then use the first as the default, i.e. len() == 1,
@@ -112,15 +144,17 @@ def _moveFiles(self):
for filename, dest in itertools.zip_longest(
copyFilesFrom, copyFilesTo, fillvalue=default
):
- pathTools.copyOrWarn("copyFilesFrom", filename, dest)
+ pathTools.copyOrWarn(CONF_COPY_FILES_FROM, filename, dest)
else:
runLog.error(
- "cs['copyFilesTo'] must either be length 1, 0, or have the same number of entries as "
- "cs['copyFilesFrom']. Actual values:\n"
- " copyFilesTo : {}\n"
- " copyFilesFrom : {}".format(copyFilesTo, copyFilesFrom)
+ f"cs['{CONF_COPY_FILES_TO}'] must either be length 0, 1, or have the same number "
+ f"of entries as cs['{CONF_COPY_FILES_FROM}']. Actual values:\n"
+ f" {CONF_COPY_FILES_TO} : {copyFilesTo}\n"
+ f" {CONF_COPY_FILES_FROM} : {copyFilesFrom}"
+ )
+ raise InputError(
+ f"Failed to process {CONF_COPY_FILES_FROM}/{CONF_COPY_FILES_TO}"
)
- raise InputError("Failed to process copyFilesTo/copyFilesFrom")
def interactBOC(self, cycle=None):
"""Typically the first interface to interact beginning of cycle."""
diff --git a/armi/bookkeeping/memoryProfiler.py b/armi/bookkeeping/memoryProfiler.py
index bde5561f3..b11d26704 100644
--- a/armi/bookkeeping/memoryProfiler.py
+++ b/armi/bookkeeping/memoryProfiler.py
@@ -39,18 +39,18 @@
from typing import Optional
import gc
import sys
-import tabulate
from armi import context
from armi import interfaces
from armi import mpiActions
from armi import runLog
from armi.reactor.composites import ArmiObject
+from armi.utils import tabulate
try:
+ # psutil is an optional requirement, since it doesnt support MacOS very well
import psutil
- # psutil is an optional requirement, since it doesnt support MacOS very well
_havePsutil = True
except ImportError:
runLog.warning(
@@ -340,14 +340,19 @@ def __init__(self):
# directly by the standard operator and reports, so easier said than done.
self.percentNodeRamUsed: Optional[float] = None
self.processMemoryInMB: Optional[float] = None
+ self.processVirtualMemoryInMB: Optional[float] = None
if _havePsutil:
self.percentNodeRamUsed = psutil.virtual_memory().percent
- self.processMemoryInMB = psutil.Process().memory_info().rss / (1012.0**2)
+ self.processMemoryInMB = psutil.Process().memory_info().rss / (1024.0**2)
+ self.processVirtualMemoryInMB = psutil.Process().memory_info().vms / (
+ 1024.0**2
+ )
def __isub__(self, other):
if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None:
self.percentNodeRamUsed -= other.percentNodeRamUsed
self.processMemoryInMB -= other.processMemoryInMB
+ self.processVirtualMemoryInMB -= other.processVirtualMemoryInMB
return self
@@ -429,6 +434,6 @@ def printUsage(self, description=None):
"Average System RAM Usage",
"Processor Memory Usage (MB)",
],
- tablefmt="armi",
+ tableFmt="armi",
)
)
diff --git a/armi/bookkeeping/report/__init__.py b/armi/bookkeeping/report/__init__.py
index efc47447c..318ea591b 100644
--- a/armi/bookkeeping/report/__init__.py
+++ b/armi/bookkeeping/report/__init__.py
@@ -27,7 +27,6 @@ def setData(name, value, group=None, reports=None):
Any value desired.
group : data.Group
reports : data.Report
-
"""
from armi.bookkeeping.report.reportInterface import ReportInterface
diff --git a/armi/bookkeeping/report/data.py b/armi/bookkeeping/report/data.py
index 3c3f40f9d..d68558d9b 100644
--- a/armi/bookkeeping/report/data.py
+++ b/armi/bookkeeping/report/data.py
@@ -149,8 +149,7 @@ def writeGroupsHTML(self, f):
class Group:
"""Abstract class, when extended is used for storage for data within a report.
- Only accepts things wrapped in the ReportDatum class
-
+ Only accepts things wrapped in the ReportDatum class.
"""
def __init__(self, title, description=""):
@@ -193,6 +192,10 @@ def __init__(self, title, description="", header=None):
def __str__(self):
"""Truer to content representation."""
+ # error handling
+ if not len(self.data):
+ return ""
+
# set up
prototypical_data = list(self.data.values())[0]
num_cols = len(prototypical_data) + 1
diff --git a/armi/bookkeeping/report/html.py b/armi/bookkeeping/report/html.py
index 696f38b14..d3be9a8f0 100644
--- a/armi/bookkeeping/report/html.py
+++ b/armi/bookkeeping/report/html.py
@@ -170,9 +170,6 @@ class Link(Tag):
tag = "link"
-# ---------------------------
-
-
def encode64(file_path):
"""Return the embedded HTML src attribute for an image in base64."""
xtn = os.path.splitext(file_path)[1][1:] # [1:] to cut out the period
@@ -180,12 +177,14 @@ def encode64(file_path):
from armi import runLog
runLog.warning(
- "'.pdf' images cannot be embedded into this HTML report. {} will not be inserted.".format(
- file_path
+ (
+ f"'.pdf' images cannot be embedded into this HTML report. {file_path} will not be"
+ " inserted."
)
)
- return "Faulty PDF image inclusion: {} attempted to be inserted but no support is currently offered for such.".format(
- file_path
+ return (
+ f"Faulty PDF image inclusion: {file_path} attempted to be inserted but no support is "
+ "currently offered for such."
)
with open(file_path, "rb") as img_src:
return r"data:image/{};base64,{}".format(
@@ -193,9 +192,6 @@ def encode64(file_path):
)
-# ---------------------------
-
-
def writeStandardReportTemplate(f, report, caseTitle=""):
f.write(r"" + "\n")
with Html(f):
@@ -220,17 +216,7 @@ def writeStandardReportTemplate(f, report, caseTitle=""):
with Span(
f, attrs={"class": "navbar-text navbar-version pull-left"}
):
- with Img(
- f,
- attrs={
- "src": encode64(
- os.path.join(
- context.RES, "images", "armiicon.ico"
- )
- )
- },
- ):
- pass
+ pass
with A(
f,
diff --git a/armi/bookkeeping/report/newReportUtils.py b/armi/bookkeeping/report/newReportUtils.py
index 9b8bf0554..a4fd7f68b 100644
--- a/armi/bookkeeping/report/newReportUtils.py
+++ b/armi/bookkeeping/report/newReportUtils.py
@@ -13,7 +13,7 @@
# limitations under the License.
import collections
import os
-import numpy
+import numpy as np
from armi import runLog
from armi.bookkeeping.report import newReports
@@ -486,7 +486,7 @@ def getPinDesignTable(core):
designInfo["zrFrac"].append(fuel.getMassFrac("ZR"))
# assumption made that all lists contain only numerical data
- designInfo = {key: numpy.average(data) for key, data in designInfo.items()}
+ designInfo = {key: np.average(data) for key, data in designInfo.items()}
dimensionless = {"sd", "hot sd", "zrFrac", "nPins"}
for key, average_value in designInfo.items():
dim = "{0:10s}".format(key)
@@ -617,7 +617,7 @@ def createDimensionReport(comp):
def insertCoreAndAssemblyMaps(
r, cs, report, blueprint, generateFullCoreMap=False, showBlockAxMesh=True
):
- r"""Create core and assembly design plots.
+ """Create core and assembly design plots.
Parameters
----------
@@ -700,7 +700,6 @@ def insertCoreAndAssemblyMaps(
fontSize=8,
)
- plotting.close()
report[DESIGN][CORE_MAP] = newReports.Image(
"Map of the Core at BOL", os.path.abspath(fName)
)
diff --git a/armi/bookkeeping/report/newReports.py b/armi/bookkeeping/report/newReports.py
index 5043f6ffc..ab6f214f1 100644
--- a/armi/bookkeeping/report/newReports.py
+++ b/armi/bookkeeping/report/newReports.py
@@ -195,12 +195,13 @@ def render(self, level, idPrefix):
Parameters
----------
level : int
- level of the nesting for this section, determines the size of the heading title for the Section
- (The higher the level, the smaller the title font-size). Ranges from H1 - H4 in html terms.
+ level of the nesting for this section, determines the size of the heading title for the
+ Section (The higher the level, the smaller the title font-size). Ranges from H1 - H4 in
+ html terms.
idPrefix : String
- used for href/id referencing for the left hand side table of contents to be paired with the item
- that render() is called upon.
+ Used for href/id referencing for the left hand side table of contents to be paired with
+ the item that render() is called upon.
Returns
-------
@@ -440,10 +441,10 @@ class TimeSeries(ReportNode):
Example
-------
- >>> series = TimeSeries("Plot of K-effective", "plot", ["k-effective"], "k-eff", "keff.png") # Adding to a plot with k-effective
+ >>> series = TimeSeries("Plot of K-effective", "plot", ["k-effective"], "k-eff", "keff.png")
>>> time = r.p.time # The current time node of the reactor.
>>> data = r.core.p.keff # The parameter k-effective value at that time.
- >>> uncertainty = r.core.p.keffUnc # Since the parameter yields keff-uncontrolled value at the current time.
+ >>> uncertainty = r.core.p.keffUnc # The keff-uncontrolled at the current time.
>>> series.add("k-effective", time, data, uncertainty) # Adds this point to be plotted later.
>>> # Adding to a plot with multiple lines for fuel Burn-Up Plot.
diff --git a/armi/bookkeeping/report/reportingUtils.py b/armi/bookkeeping/report/reportingUtils.py
index 3a0cfcf3b..ce460d244 100644
--- a/armi/bookkeeping/report/reportingUtils.py
+++ b/armi/bookkeeping/report/reportingUtils.py
@@ -23,11 +23,10 @@
import re
import subprocess
import sys
-import tabulate
import textwrap
import time
-import numpy
+import numpy as np
from armi import context
from armi import interfaces
@@ -39,6 +38,7 @@
from armi.utils import getFileSHA1Hash
from armi.utils import iterables
from armi.utils import plotting
+from armi.utils import tabulate
from armi.utils import textProcessors
from armi.utils import units
@@ -78,13 +78,13 @@ def _writeCaseInformation(o, cs):
(Operator_ArmiCodebase, context.ROOT),
(Operator_WorkingDirectory, os.getcwd()),
(Operator_PythonInterperter, sys.version),
- (Operator_MasterMachine, os.environ.get("COMPUTERNAME", "?")),
+ (Operator_MasterMachine, getNodeName()),
(Operator_NumProcessors, context.MPI_SIZE),
(Operator_Date, context.START_TIME),
]
runLog.header("=========== Case Information ===========")
- runLog.info(tabulate.tabulate(caseInfo, tablefmt="armi"))
+ runLog.info(tabulate.tabulate(caseInfo, tableFmt="armi"))
def _listInputFiles(cs):
"""
@@ -170,7 +170,7 @@ def _writeInputFileInformation(cs):
tabulate.tabulate(
inputFileData,
headers=["Input Type", "Path", "SHA-1 Hash"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
@@ -191,24 +191,18 @@ def _writeMachineInformation():
nodeMappingData.append(
(uniqueName, numProcessors, ", ".join(matchingProcs))
)
- # If this is on Windows: run sys info on each unique node too
- if "win" in sys.platform:
- sysInfoCmd = (
- 'systeminfo | findstr /B /C:"OS Name" /B /C:"OS Version" /B '
- '/C:"Processor" && systeminfo | findstr /E /C:"Mhz"'
- )
- out = subprocess.run(
- sysInfoCmd, capture_output=True, text=True, shell=True
- )
- sysInfo += out.stdout
+
+ sysInfo += getSystemInfo()
+
runLog.header("=========== Machine Information ===========")
runLog.info(
tabulate.tabulate(
nodeMappingData,
headers=["Machine", "Number of Processors", "Ranks"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
+
if sysInfo:
runLog.header("=========== System Information ===========")
runLog.info(sysInfo)
@@ -230,7 +224,7 @@ def _writeReactorCycleInformation(o, cs):
paramStr = [str(p) for p in param]
operatingData.append((name, textwrap.fill(", ".join(paramStr))))
runLog.header("=========== Reactor Cycle Information ===========")
- runLog.info(tabulate.tabulate(operatingData, tablefmt="armi"))
+ runLog.info(tabulate.tabulate(operatingData, tableFmt="armi"))
if context.MPI_RANK > 0:
return # prevent the worker nodes from printing the same thing
@@ -241,6 +235,175 @@ def _writeReactorCycleInformation(o, cs):
_writeReactorCycleInformation(o, cs)
+def getNodeName():
+ """Get the name of this compute node.
+
+ First, look in context.py. Then try various Linux tools. Then try Windows commands.
+
+ Returns
+ -------
+ str
+ Compute node name.
+ """
+ hostNames = [
+ context.MPI_NODENAME,
+ context.MPI_NODENAMES[0],
+ subprocess.run("hostname", capture_output=True, text=True, shell=True).stdout,
+ subprocess.run("uname -n", capture_output=True, text=True, shell=True).stdout,
+ os.environ.get("COMPUTERNAME", context.LOCAL),
+ ]
+ for nodeName in hostNames:
+ if nodeName and nodeName != context.LOCAL:
+ return nodeName
+
+ return context.LOCAL
+
+
+def _getSystemInfoWindows():
+ """Get system information, assuming the system is Windows.
+
+ Returns
+ -------
+ str
+ Basic system information: OS name, OS version, basic processor information
+
+ Examples
+ --------
+ Example results:
+
+ OS Name: Microsoft Windows 10 Enterprise
+ OS Version: 10.0.19041 N/A Build 19041
+ Processor(s): 1 Processor(s) Installed.
+ [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz
+ """
+ cmd = (
+ 'systeminfo | findstr /B /C:"OS Name" /B /C:"OS Version" /B '
+ '/C:"Processor" && systeminfo | findstr /E /C:"Mhz"'
+ )
+ return subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout
+
+
+def _getSystemInfoMac():
+ """Get system information, assuming the system is MacOS.
+
+ Returns
+ -------
+ str
+ Basic system information: OS name, OS version, basic processor information
+
+ Examples
+ --------
+ Example results:
+
+ System Software Overview:
+
+ System Version: macOS 12.1 (21C52)
+ Kernel Version: Darwin 21.2.0
+ ...
+ Hardware Overview:
+ Model Name: MacBook Pro
+ ...
+ """
+ cmd = "system_profiler SPSoftwareDataType SPHardwareDataType"
+ return subprocess.check_output(cmd, shell=True).decode("utf-8")
+
+
+def _getSystemInfoLinux():
+ """Get system information, assuming the system is Linux.
+
+ This method uses multiple, redundant variations on common Linux command utilities to get the
+ information necessary. While it is not possible to guarantee what programs or files will be
+ available on "all Linux operating system", this collection of tools is widely supported and
+ should provide a reasonably broad-distribution coverage.
+
+ Returns
+ -------
+ str
+ Basic system information: OS name, OS version, basic processor information
+
+ Examples
+ --------
+ Example results:
+
+ OS Info: Ubuntu 22.04.3 LTS
+ Processor(s):
+ processor : 0
+ vendor_id : GenuineIntel
+ cpu family : 6
+ model : 126
+ model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
+ ...
+ """
+ # get OS name / version
+ linuxOsCommands = [
+ 'cat /etc/os-release | grep "^PRETTY_NAME=" | cut -d = -f 2',
+ "uname -a",
+ "lsb_release -d | cut -d : -f 2",
+ 'hostnamectl | grep "Operating System" | cut -d : -f 2',
+ ]
+ osInfo = ""
+ for cmd in linuxOsCommands:
+ osInfo = subprocess.run(
+ cmd, capture_output=True, text=True, shell=True
+ ).stdout.strip()
+ if osInfo:
+ break
+
+ if not osInfo:
+ runLog.warning("Linux OS information not found.")
+ return ""
+
+ # get processor information
+ linuxProcCommands = ["cat /proc/cpuinfo", "lscpu", "lshw -class CPU"]
+ procInfo = ""
+ for cmd in linuxProcCommands:
+ procInfo = subprocess.run(
+ cmd, capture_output=True, text=True, shell=True
+ ).stdout
+ if procInfo:
+ break
+
+ if not procInfo:
+ runLog.warning("Linux processor information not found.")
+ return ""
+
+ # build output string
+ out = "OS Info: "
+ out += osInfo.strip()
+ out += "\nProcessor(s):\n "
+ out += procInfo.strip().replace("\n", "\n ")
+ out += "\n"
+
+ return out
+
+
+def getSystemInfo():
+ """Get system information, assuming the system is Windows or Linux.
+
+ Notes
+ -----
+ The format of the system information will be different on Windows vs Linux.
+
+ Returns
+ -------
+ str
+ Basic system information: OS name, OS version, basic processor information
+ """
+ # Get basic system information (on Windows and Linux)
+ if "win" in sys.platform:
+ return _getSystemInfoWindows()
+ elif "linux" in sys.platform:
+ return _getSystemInfoLinux()
+ elif "darwin" in sys.platform:
+ return _getSystemInfoMac()
+ else:
+ runLog.warning(
+ f"Cannot get system information for {sys.platform} because ARMI only "
+ + "supports Linux, Windows, and MacOS."
+ )
+ return ""
+
+
def getInterfaceStackSummary(o):
data = []
for ii, i in enumerate(o.interfaces, start=1):
@@ -266,7 +429,7 @@ def getInterfaceStackSummary(o):
"EOL order",
"BOL forced",
),
- tablefmt="armi",
+ tableFmt="armi",
)
text = text
return text
@@ -276,7 +439,7 @@ def writeTightCouplingConvergenceSummary(convergenceSummary):
runLog.info("Tight Coupling Convergence Summary")
runLog.info(
tabulate.tabulate(
- convergenceSummary, headers="keys", showindex=True, tablefmt="armi"
+ convergenceSummary, headers="keys", showIndex=True, tableFmt="armi"
)
)
@@ -390,8 +553,7 @@ def _makeBOLAssemblyMassSummary(massSum):
line += "{0:<25.3f}".format(s[val])
str_.append("{0:12s}{1}".format(val, line))
- # print blocks in this assembly
- # up to 10
+ # print blocks in this assembly up to 10
for i in range(10):
line = " " * 12
for s in massSum:
@@ -401,6 +563,7 @@ def _makeBOLAssemblyMassSummary(massSum):
line += " " * 25
if re.search(r"\S", line): # \S matches any non-whitespace character.
str_.append(line)
+
return "\n".join(str_)
@@ -425,10 +588,10 @@ def writeCycleSummary(core):
Parameters
----------
- core: armi.reactor.reactors.Core
+ core: armi.reactor.reactors.Core
cs: armi.settings.caseSettings.Settings
"""
- # would io be worth considering for this?
+ # Would io be worth considering for this?
cycle = core.r.p.cycle
str_ = []
runLog.important("Cycle {0} Summary:".format(cycle))
@@ -446,7 +609,6 @@ def setNeutronBalancesReport(core):
Parameters
----------
core : armi.reactor.reactors.Core
-
"""
if not core.getFirstBlock().p.rateCap:
runLog.warning(
@@ -539,7 +701,7 @@ def summarizePinDesign(core):
designInfo["zrFrac"].append(fuel.getMassFrac("ZR"))
# assumption made that all lists contain only numerical data
- designInfo = {key: numpy.average(data) for key, data in designInfo.items()}
+ designInfo = {key: np.average(data) for key, data in designInfo.items()}
dimensionless = {"sd", "hot sd", "zrFrac", "nPins"}
for key, average_value in designInfo.items():
@@ -642,7 +804,7 @@ def makeCoreDesignReport(core, cs):
Parameters
----------
- core: armi.reactor.reactors.Core
+ core: armi.reactor.reactors.Core
cs: armi.settings.caseSettings.Settings
"""
coreDesignTable = report.data.Table(
@@ -957,7 +1119,6 @@ def makeCoreAndAssemblyMaps(r, cs, generateFullCoreMap=False, showBlockAxMesh=Tr
titleSize=10,
fontSize=8,
)
- plotting.close()
report.setData(
"Radial Core Map", os.path.abspath(fName), report.FACE_MAP, report.DESIGN
diff --git a/armi/bookkeeping/report/tests/test_newReport.py b/armi/bookkeeping/report/tests/test_newReport.py
index 80b1ecb10..bf11ecf2c 100644
--- a/armi/bookkeeping/report/tests/test_newReport.py
+++ b/armi/bookkeeping/report/tests/test_newReport.py
@@ -31,7 +31,9 @@
class TestReportContentCreation(unittest.TestCase):
def setUp(self):
- self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT)
+ self.o, self.r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
diff --git a/armi/bookkeeping/report/tests/test_report.py b/armi/bookkeeping/report/tests/test_report.py
index 648d1bde3..6dfb12e26 100644
--- a/armi/bookkeeping/report/tests/test_report.py
+++ b/armi/bookkeeping/report/tests/test_report.py
@@ -13,15 +13,25 @@
# limitations under the License.
"""Really basic tests of the report Utils."""
+from glob import glob
+from unittest.mock import patch
import logging
import os
+import subprocess
+import sys
import unittest
from armi import runLog, settings
from armi.bookkeeping import report
from armi.bookkeeping.report import data, reportInterface
from armi.bookkeeping.report.reportingUtils import (
+ _getSystemInfoLinux,
+ _getSystemInfoMac,
+ _getSystemInfoWindows,
+ getNodeName,
+ getSystemInfo,
makeBlockDesignReport,
+ makeCoreDesignReport,
setNeutronBalancesReport,
summarizePinDesign,
summarizePower,
@@ -35,6 +45,101 @@
from armi.utils.directoryChangers import TemporaryDirectoryChanger
+class _MockReturnResult:
+ """Mocking the subprocess.run() return object."""
+
+ def __init__(self, stdout):
+ self.stdout = stdout
+
+
+class TestReportingUtils(unittest.TestCase):
+ def test_getSystemInfoLinux(self):
+ """Test _getSystemInfoLinux() on any operating system, by mocking the system calls."""
+ osInfo = '"Ubuntu 22.04.3 LTS"'
+ procInfo = """processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 126
+model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
+...
+"""
+ correctResult = """OS Info: "Ubuntu 22.04.3 LTS"
+Processor(s):
+ processor : 0
+ vendor_id : GenuineIntel
+ cpu family : 6
+ model : 126
+ model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
+ ..."""
+
+ def __mockSubprocessRun(*args, **kwargs):
+ if "os-release" in args[0]:
+ return _MockReturnResult(osInfo)
+ else:
+ return _MockReturnResult(procInfo)
+
+ with patch.object(subprocess, "run", side_effect=__mockSubprocessRun):
+ out = _getSystemInfoLinux()
+ self.assertEqual(out.strip(), correctResult)
+
+ @patch("subprocess.run")
+ def test_getSystemInfoWindows(self, mockSubprocess):
+ """Test _getSystemInfoWindows() on any operating system, by mocking the system call."""
+ windowsResult = """OS Name: Microsoft Windows 10 Enterprise
+OS Version: 10.0.19041 N/A Build 19041
+Processor(s): 1 Processor(s) Installed.
+ [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz"""
+
+ mockSubprocess.return_value = _MockReturnResult(windowsResult)
+
+ out = _getSystemInfoWindows()
+ self.assertEqual(out, windowsResult)
+
+ @patch("subprocess.run")
+ def test_getSystemInfoMac(self, mockSubprocess):
+ """Test _getSystemInfoMac() on any operating system, by mocking the system call."""
+ macResult = b"""System Software Overview:
+
+ System Version: macOS 12.1 (21C52)
+ Kernel Version: Darwin 21.2.0
+ ...
+ Hardware Overview:
+ Model Name: MacBook Pro
+ ..."""
+
+ mockSubprocess.return_value = _MockReturnResult(macResult)
+
+ out = _getSystemInfoMac()
+ self.assertEqual(out, macResult.decode("utf-8"))
+
+ def test_getSystemInfo(self):
+ """Basic sanity check of getSystemInfo() running in the wild.
+
+ This test should pass if it is run on Window or mainstream Linux distros. But we expect this
+ to fail if the test is run on some other OS.
+ """
+ if "darwin" in sys.platform:
+ # too comlicated to test MacOS in this method
+ return
+
+ out = getSystemInfo()
+ substrings = ["OS ", "Processor(s):"]
+
+ for sstr in substrings:
+ self.assertIn(sstr, out)
+
+ self.assertGreater(len(out), sum(len(sstr) + 5 for sstr in substrings))
+
+ def test_getNodeName(self):
+ """Test that the getNodeName() method returns a non-empty string.
+
+ It is hard to know what string SHOULD be return here, and it would depend on how the OS is
+ set up on your machine or cluster. But this simple test needs to pass as-is on Windows
+ and Linux.
+ """
+ self.assertGreater(len(getNodeName()), 0)
+
+
class TestReport(unittest.TestCase):
def setUp(self):
self.test_group = data.Table(settings.Settings(), "banana")
@@ -77,7 +182,11 @@ def test_getData(self):
def test_reactorSpecificReporting(self):
"""Test a number of reporting utils that require reactor/core information."""
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
+
+ # make sure makeCoreDesignReport() doesn't fail, though it won't generate an output here
+ makeCoreDesignReport(r.core, o.cs)
+ self.assertEqual(len(glob("*.html")), 0)
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
@@ -88,8 +197,6 @@ def test_reactorSpecificReporting(self):
writeAssemblyMassSummary(r)
self.assertIn("BOL Assembly Mass Summary", mock.getStdout())
self.assertIn("igniter fuel", mock.getStdout())
- self.assertIn("primary control", mock.getStdout())
- self.assertIn("plenum", mock.getStdout())
mock.emptyStdout()
setNeutronBalancesReport(r.core)
@@ -114,9 +221,7 @@ def test_reactorSpecificReporting(self):
mock.emptyStdout()
summarizePower(r.core)
- self.assertIn("Power in radial shield", mock.getStdout())
- self.assertIn("Power in primary control", mock.getStdout())
- self.assertIn("Power in feed fuel", mock.getStdout())
+ self.assertIn("Power in igniter fuel", mock.getStdout())
mock.emptyStdout()
writeCycleSummary(r.core)
@@ -135,7 +240,7 @@ def test_reactorSpecificReporting(self):
self.assertEqual(len(mock.getStdout()), 0)
def test_writeWelcomeHeaders(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# grab this file path
randoFile = os.path.abspath(__file__)
@@ -187,7 +292,7 @@ def test_distributableReportInt(self):
self.assertEqual(repInt.distributable(), 4)
def test_interactBOLReportInt(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
@@ -195,10 +300,9 @@ def test_interactBOLReportInt(self):
self.assertIn("Writing assem layout", mock.getStdout())
self.assertIn("BOL Assembly", mock.getStdout())
self.assertIn("wetMass", mock.getStdout())
- self.assertIn("moveable plenum", mock.getStdout())
def test_interactEveryNode(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
@@ -208,15 +312,15 @@ def test_interactEveryNode(self):
self.assertIn("keff=", mock.getStdout())
def test_interactBOC(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
self.assertEqual(repInt.fuelCycleSummary["bocFissile"], 0.0)
repInt.interactBOC(1)
- self.assertAlmostEqual(repInt.fuelCycleSummary["bocFissile"], 726.30401755)
+ self.assertAlmostEqual(repInt.fuelCycleSummary["bocFissile"], 4.290603409612653)
def test_interactEOC(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
@@ -225,7 +329,7 @@ def test_interactEOC(self):
self.assertIn("TIMER REPORTS", mock.getStdout())
def test_interactEOL(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
diff --git a/armi/bookkeeping/tests/__init__.py b/armi/bookkeeping/tests/__init__.py
index a17aebc56..7a5fd7a0a 100644
--- a/armi/bookkeeping/tests/__init__.py
+++ b/armi/bookkeeping/tests/__init__.py
@@ -20,4 +20,4 @@
prevent having to import the world just to get something like a list of strings.
"""
-from ._constants import * # noqa: unused-import
+from armi.bookkeeping.tests._constants import * # noqa: F403
diff --git a/armi/bookkeeping/tests/test_historyTracker.py b/armi/bookkeeping/tests/test_historyTracker.py
index a75beb943..7195aea15 100644
--- a/armi/bookkeeping/tests/test_historyTracker.py
+++ b/armi/bookkeeping/tests/test_historyTracker.py
@@ -101,11 +101,11 @@ def tearDown(self):
self.td.__exit__(None, None, None)
def test_calcMGFluence(self):
- """
+ r"""
This test confirms that mg flux has many groups when loaded with the history tracker.
armi.bookeeping.db.hdf.hdfDB.readBlocksHistory requires
- historical_values[historical_indices] to be cast as a list to read more than the
+ historical_values\[historical_indices\] to be cast as a list to read more than the
first energy group. This test shows that this behavior is preserved.
.. test:: Demonstrate that a parameter stored at differing time nodes can be recovered.
@@ -202,9 +202,10 @@ def test_historyReport(self):
"""
Test generation of history report.
- This does a swap for 5 timesteps:
- | TS 0 1 2 3 4
- |LOC (1,1) (2,1) (3,1) (4,1) SFP
+ This does a swap for 5 timesteps::
+
+ | TS 0 1 2 3 4
+ |LOC (1,1) (2,1) (3,1) (4,1) SFP
"""
history = self.o.getInterface("history")
history.interactBOL()
diff --git a/armi/bookkeeping/tests/test_memoryProfiler.py b/armi/bookkeeping/tests/test_memoryProfiler.py
index 51bd95dc6..b750ae1cd 100644
--- a/armi/bookkeeping/tests/test_memoryProfiler.py
+++ b/armi/bookkeeping/tests/test_memoryProfiler.py
@@ -24,7 +24,11 @@
class TestMemoryProfiler(unittest.TestCase):
def setUp(self):
- self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT, {"debugMem": True})
+ self.o, self.r = test_reactors.loadTestReactor(
+ TEST_ROOT,
+ {"debugMem": True},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
self.memPro = self.o.getInterface("memoryProfiler")
def tearDown(self):
diff --git a/armi/bookkeeping/visualization/__init__.py b/armi/bookkeeping/visualization/__init__.py
index 23cd7c36d..afeaa5451 100644
--- a/armi/bookkeeping/visualization/__init__.py
+++ b/armi/bookkeeping/visualization/__init__.py
@@ -22,7 +22,5 @@
produce them. Other formats (e.g., SILO) tend to require more system-dependent binary
dependencies, so optional support for them may be added later.
"""
-from armi import plugins # noqa: unused-import
-from armi.bookkeeping.visualization.entryPoint import (
- VisFileEntryPoint, # noqa: unused-import
-)
+from armi import plugins # noqa: F401
+from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint # noqa: F401
diff --git a/armi/bookkeeping/visualization/tests/test_vis.py b/armi/bookkeeping/visualization/tests/test_vis.py
index 3b6f6bddf..dd3b863e6 100644
--- a/armi/bookkeeping/visualization/tests/test_vis.py
+++ b/armi/bookkeeping/visualization/tests/test_vis.py
@@ -15,7 +15,7 @@
"""Test report visualization."""
import unittest
-import numpy
+import numpy as np
from pyevtk.vtk import VtkTetra
from armi import settings
@@ -41,12 +41,12 @@ def test_testVtkMesh(self):
self.assertEqual(mesh.offsets.size, 0)
self.assertEqual(mesh.cellTypes.size, 0)
- verts = numpy.array(
+ verts = np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.25, 0.25, 0.5]]
)
- conn = numpy.array([0, 1, 2, 3])
- offsets = numpy.array([4])
- cellTypes = numpy.array([VtkTetra.tid])
+ conn = np.array([0, 1, 2, 3])
+ offsets = np.array([4])
+ cellTypes = np.array([VtkTetra.tid])
newMesh = utils.VtkMesh(verts, conn, offsets, cellTypes)
mesh.append(newMesh)
@@ -67,7 +67,9 @@ class TestVisDump(unittest.TestCase):
@classmethod
def setUpClass(cls):
caseSetting = settings.Settings()
- _, cls.r = test_reactors.loadTestReactor()
+ _, cls.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
cls.hexBlock = cls.r.core.getBlocks()[0]
diff --git a/armi/bookkeeping/visualization/utils.py b/armi/bookkeeping/visualization/utils.py
index a27a0a3e0..c692afeeb 100644
--- a/armi/bookkeeping/visualization/utils.py
+++ b/armi/bookkeeping/visualization/utils.py
@@ -22,7 +22,7 @@
import math
-import numpy
+import numpy as np
from pyevtk.hl import unstructuredGridToVTK
from pyevtk.vtk import VtkHexahedron, VtkQuadraticHexahedron
@@ -54,13 +54,13 @@ def __init__(self, vertices, connectivity, offsets, cellTypes):
"""
Parameters
----------
- vertices : numpy array
+ vertices : np.ndarray
An Nx3 numpy array with one row per (x,y,z) vertex
- connectivity : numpy array
+ connectivity : np.ndarray
A 1-D array containing the vertex indices belonging to each cell
- offsets : numpy array
+ offsets : np.ndarray
A 1-D array containing the index of the first vertex for the next cell
- cellTypes : numpy array
+ cellTypes : np.ndarray
A 1-D array contining the cell type ID for each cell
"""
self.vertices = vertices
@@ -71,35 +71,35 @@ def __init__(self, vertices, connectivity, offsets, cellTypes):
@staticmethod
def empty():
return VtkMesh(
- numpy.empty((0, 3), dtype=numpy.float64),
- numpy.array([], dtype=numpy.int32),
- numpy.array([], dtype=numpy.int32),
- numpy.array([], dtype=numpy.int32),
+ np.empty((0, 3), dtype=np.float64),
+ np.array([], dtype=np.int32),
+ np.array([], dtype=np.int32),
+ np.array([], dtype=np.int32),
)
@property
def x(self):
- return numpy.array(self.vertices[:, 0])
+ return np.array(self.vertices[:, 0])
@property
def y(self):
- return numpy.array(self.vertices[:, 1])
+ return np.array(self.vertices[:, 1])
@property
def z(self):
- return numpy.array(self.vertices[:, 2])
+ return np.array(self.vertices[:, 2])
def append(self, other):
"""Add more cells to the mesh."""
connectOffset = self.vertices.shape[0]
offsetOffset = self.offsets[-1] if self.offsets.size > 0 else 0
- self.vertices = numpy.vstack((self.vertices, other.vertices))
- self.connectivity = numpy.append(
+ self.vertices = np.vstack((self.vertices, other.vertices))
+ self.connectivity = np.append(
self.connectivity, other.connectivity + connectOffset
)
- self.offsets = numpy.append(self.offsets, other.offsets + offsetOffset)
- self.cellTypes = numpy.append(self.cellTypes, other.cellTypes)
+ self.offsets = np.append(self.offsets, other.offsets + offsetOffset)
+ self.cellTypes = np.append(self.cellTypes, other.cellTypes)
def write(self, path, data) -> str:
"""
@@ -193,25 +193,23 @@ def _createHexBlockMesh(b: blocks.HexBlock) -> VtkMesh:
zMax = b.p.ztop
gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]
- gridOffset = numpy.tile(gridOffset, (6, 1))
+ gridOffset = np.tile(gridOffset, (6, 1))
pitch = b.getPitch()
- hexVerts2d = numpy.array(hexagon.corners(rotation=0)) * pitch
+ hexVerts2d = np.array(hexagon.corners(rotation=0)) * pitch
hexVerts2d += gridOffset
# we need a top and bottom hex
- hexVerts2d = numpy.vstack((hexVerts2d, hexVerts2d))
+ hexVerts2d = np.vstack((hexVerts2d, hexVerts2d))
# fold in z locations to get 3d coordinates
- hexVerts = numpy.hstack(
- (hexVerts2d, numpy.array([[zMin] * 6 + [zMax] * 6]).transpose())
- )
+ hexVerts = np.hstack((hexVerts2d, np.array([[zMin] * 6 + [zMax] * 6]).transpose()))
return VtkMesh(
hexVerts,
- numpy.array(list(range(12))),
- numpy.array([12]),
- numpy.array([_HEX_PRISM_TID]),
+ np.array(list(range(12))),
+ np.array([12]),
+ np.array([_HEX_PRISM_TID]),
)
@@ -222,13 +220,13 @@ def _createCartesianBlockMesh(b: blocks.CartesianBlock) -> VtkMesh:
zMax = b.p.ztop
gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]
- gridOffset = numpy.tile(gridOffset, (4, 1))
+ gridOffset = np.tile(gridOffset, (4, 1))
pitch = b.getPitch()
halfPitchX = pitch[0] * 0.5
halfPitchY = pitch[0] * 0.5
- rectVerts = numpy.array(
+ rectVerts = np.array(
[
[halfPitchX, halfPitchY],
[-halfPitchX, halfPitchY],
@@ -239,18 +237,16 @@ def _createCartesianBlockMesh(b: blocks.CartesianBlock) -> VtkMesh:
rectVerts += gridOffset
# make top/bottom rectangles
- boxVerts = numpy.vstack((rectVerts, rectVerts))
+ boxVerts = np.vstack((rectVerts, rectVerts))
# fold in z coordinates
- boxVerts = numpy.hstack(
- (boxVerts, numpy.array([[zMin] * 4 + [zMax] * 4]).transpose())
- )
+ boxVerts = np.hstack((boxVerts, np.array([[zMin] * 4 + [zMax] * 4]).transpose()))
return VtkMesh(
boxVerts,
- numpy.array(list(range(8))),
- numpy.array([8]),
- numpy.array([VtkHexahedron.tid]),
+ np.array(list(range(8))),
+ np.array([8]),
+ np.array([VtkHexahedron.tid]),
)
@@ -285,13 +281,13 @@ def _createTRZBlockMesh(b: blocks.ThRZBlock) -> VtkMesh:
(rOut, thIn, (zIn + zOut) * 0.5),
(rOut, thOut, (zIn + zOut) * 0.5),
]
- vertsXYZ = numpy.array(
+ vertsXYZ = np.array(
[[r * math.cos(th), r * math.sin(th), z] for r, th, z in vertsRTZ]
)
return VtkMesh(
vertsXYZ,
- numpy.array(list(range(20))),
- numpy.array([20]),
- numpy.array([VtkQuadraticHexahedron.tid]),
+ np.array(list(range(20))),
+ np.array([20]),
+ np.array([VtkQuadraticHexahedron.tid]),
)
diff --git a/armi/bookkeeping/visualization/vtk.py b/armi/bookkeeping/visualization/vtk.py
index ab59c3201..8b34b3590 100644
--- a/armi/bookkeeping/visualization/vtk.py
+++ b/armi/bookkeeping/visualization/vtk.py
@@ -32,7 +32,7 @@
from typing import Dict, Any, List, Optional, Set, Tuple
-import numpy
+import numpy as np
from pyevtk.vtk import VtkGroup
from armi import runLog
@@ -112,7 +112,7 @@ def dumpState(
blockNdens = database3.collectBlockNumberDensities(blks)
# we need to copy the number density vectors to guarantee unit stride, which
# pyevtk requires. Kinda seems like something it could do for us, but oh well.
- blockNdens = {key: numpy.array(value) for key, value in blockNdens.items()}
+ blockNdens = {key: np.array(value) for key, value in blockNdens.items()}
blockData.update(blockNdens)
fullPath = blockMesh.write(blockPath, blockData)
@@ -161,7 +161,7 @@ def _collectObjectData(
val = obj.p[pDef.name]
data.append(val)
- data = numpy.array(data)
+ data = np.array(data)
if data.dtype.kind == "S" or data.dtype.kind == "U":
# no string support!
@@ -169,7 +169,7 @@ def _collectObjectData(
if data.dtype.kind == "O":
# datatype is "object", usually because it's jagged, or has Nones. We are
# willing to try handling the Nones, but jagged also isnt visualizable.
- nones = numpy.where([d is None for d in data])[0]
+ nones = np.where([d is None for d in data])[0]
if len(nones) == data.shape[0]:
# all Nones, so give up
diff --git a/armi/bookkeeping/visualization/xdmf.py b/armi/bookkeeping/visualization/xdmf.py
index 256e712f4..a7d2f36ce 100644
--- a/armi/bookkeeping/visualization/xdmf.py
+++ b/armi/bookkeeping/visualization/xdmf.py
@@ -33,8 +33,7 @@
wedges. To do that would require splitting the parameter data, which would defeat
the main benefit of using XMDF in the first place (to be able to plot out of the
original Database file). Cartesian and R-X-Theta geometries in VisIt seem to work
- fine. Support for polyhedra is being tracked in `#1287
- `_.
+ fine.
"""
import io
@@ -44,7 +43,7 @@
import xml.etree.ElementTree as ET
import xml.dom.minidom
-import numpy
+import numpy as np
import h5py
from armi import runLog
@@ -67,7 +66,7 @@
# proper XDMF, these need to be offset to the proper vertex indices in the full mesh,
# and have the number of face vertices inserted into the proper locations (notice the
# [0] placeholders).
-_HEX_PRISM_TOPO = numpy.array(
+_HEX_PRISM_TOPO = np.array(
[0]
+ list(range(6))
+ [0]
@@ -87,25 +86,25 @@
)
# The indices of the placeholder zeros from _HEX_PRISM_TOPO array above
-_HEX_PRISM_FACE_SIZE_IDX = numpy.array([0, 7, 14, 19, 24, 29, 34, 39])
+_HEX_PRISM_FACE_SIZE_IDX = np.array([0, 7, 14, 19, 24, 29, 34, 39])
# The number of vertices for each face
-_HEX_PRISM_FACE_SIZES = numpy.array([6, 6, 4, 4, 4, 4, 4, 4])
+_HEX_PRISM_FACE_SIZES = np.array([6, 6, 4, 4, 4, 4, 4, 4])
def _getAttributesFromDataset(d: h5py.Dataset) -> Dict[str, str]:
dataType = {
- numpy.dtype("int32"): "Int",
- numpy.dtype("int64"): "Int",
- numpy.dtype("float32"): "Float",
- numpy.dtype("float64"): "Float",
+ np.dtype("int32"): "Int",
+ np.dtype("int64"): "Int",
+ np.dtype("float32"): "Float",
+ np.dtype("float64"): "Float",
}[d.dtype]
precision = {
- numpy.dtype("int32"): "4",
- numpy.dtype("int64"): "8",
- numpy.dtype("float32"): "4",
- numpy.dtype("float64"): "8",
+ np.dtype("int32"): "4",
+ np.dtype("int64"): "8",
+ np.dtype("float32"): "4",
+ np.dtype("float64"): "8",
}[d.dtype]
return {
@@ -373,11 +372,11 @@ def _makeBlockMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:
verticesInH5 = groupName + "/blk_vertices"
self._meshH5[verticesInH5] = verts
- topoValues = numpy.array([], dtype=numpy.int32)
+ topoValues = np.array([], dtype=np.int32)
offset = 0
for b in blks:
nVerts, cellTopo = _getTopologyFromShape(b, offset)
- topoValues = numpy.append(topoValues, cellTopo)
+ topoValues = np.append(topoValues, cellTopo)
offset += nVerts
topoInH5 = groupName + "/blk_topology"
@@ -407,11 +406,11 @@ def _makeAssemblyMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:
verticesInH5 = groupName + "/asy_vertices"
self._meshH5[verticesInH5] = verts
- topoValues = numpy.array([], dtype=numpy.int32)
+ topoValues = np.array([], dtype=np.int32)
offset = 0
for a in asys:
nVerts, cellTopo = _getTopologyFromShape(a[0], offset)
- topoValues = numpy.append(topoValues, cellTopo)
+ topoValues = np.append(topoValues, cellTopo)
offset += nVerts
topoInH5 = groupName + "/asy_topology"
@@ -473,7 +472,7 @@ def _getTopologyFromShape(b: blocks.Block, offset: int) -> Tuple[int, List[int]]
prefix = [_POLYHEDRON, 8]
topo = _HEX_PRISM_TOPO + offset
topo[_HEX_PRISM_FACE_SIZE_IDX] = _HEX_PRISM_FACE_SIZES
- topo = numpy.append(prefix, topo)
+ topo = np.append(prefix, topo)
return 12, topo
diff --git a/armi/cases/__init__.py b/armi/cases/__init__.py
index 7b6a9b48a..446c2cfa8 100644
--- a/armi/cases/__init__.py
+++ b/armi/cases/__init__.py
@@ -63,5 +63,5 @@
Then submit the inputs to your HPC cluster.
"""
-from armi.cases.case import Case # noqa: unused-import
-from armi.cases.suite import CaseSuite # noqa: unused-import
+from armi.cases.case import Case # noqa: F401
+from armi.cases.suite import CaseSuite # noqa: F401
diff --git a/armi/cases/case.py b/armi/cases/case.py
index dec8e69b4..c0a3e6c29 100644
--- a/armi/cases/case.py
+++ b/armi/cases/case.py
@@ -30,10 +30,8 @@
import glob
import os
import pathlib
-import platform
import pstats
import re
-import shutil
import sys
import textwrap
import time
@@ -41,7 +39,6 @@
import coverage
import six
-import tabulate
from armi import context
from armi import getPluginManager
@@ -57,13 +54,14 @@
from armi.reactor import reactors
from armi.reactor import systemLayoutInput
from armi.utils import pathTools
+from armi.utils import tabulate
from armi.utils import textProcessors
from armi.utils.customExceptions import NonexistentSetting
from armi.utils.directoryChangers import DirectoryChanger
from armi.utils.directoryChangers import ForcedCreationDirectoryChanger
-# change from default .coverage to help with Windows dotfile issues.
-# Must correspond with data_file entry in `coveragerc`!
+# Change from default .coverage to help with Windows dotfile issues.
+# Must correspond with data_file entry in `pyproject.toml`!
COVERAGE_RESULTS_FILE = "coverage_results.cov"
@@ -263,11 +261,10 @@ def _getPotentialDependencies(self, dirName, title):
def caseMatches(case):
if os.path.normcase(case.title) != os.path.normcase(title):
return False
- if os.path.normcase(os.path.abspath(case.directory)) != os.path.normcase(
- os.path.abspath(dirName)
- ):
- return False
- return True
+
+ return os.path.normcase(
+ os.path.abspath(case.directory)
+ ) == os.path.normcase(os.path.abspath(dirName))
return {case for case in self._caseSuite if caseMatches(case)}
@@ -444,6 +441,10 @@ def _getCoverageRcFile(userCovFile, makeCopy=False):
"""Helper to provide the coverage configuration file according to the OS. A
user-supplied file will take precedence, and is not checked for a dot-filename.
+ Notes
+ -----
+ ARMI replaced the ".coveragerc" file has been replaced by "pyproject.toml".
+
Parameters
----------
userCovFile : str
@@ -455,21 +456,14 @@ def _getCoverageRcFile(userCovFile, makeCopy=False):
Returns
-------
covFile : str
- path of coveragerc file
+ path of pyprojec.toml file
"""
# User-defined file takes precedence.
if userCovFile:
return os.path.abspath(userCovFile)
covRcDir = os.path.abspath(context.PROJECT_ROOT)
- covFile = os.path.join(covRcDir, ".coveragerc")
- if platform.system() == "Windows":
- covFileWin = os.path.join(covRcDir, "coveragerc")
- if makeCopy is True:
- # Make a copy of the file without the dot in the name
- shutil.copy(covFile, covFileWin)
- return covFileWin
- return covFile
+ return os.path.join(covRcDir, "pyproject.toml")
def _startProfiling(self):
"""Helper to the Case.run(): start the Python profiling,
@@ -611,7 +605,7 @@ def checkInputs(self):
tabulate.tabulate(
queryData,
headers=["Number", "Statement", "Question"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
if context.CURRENT_MODE == context.Mode.INTERACTIVE:
@@ -624,32 +618,6 @@ def summarizeDesign(self):
"""Uses the ReportInterface to create a fancy HTML page describing the design inputs."""
_ = reportsEntryPoint.createReportFromSettings(self.cs)
- def buildCommand(self, python="python"):
- """
- Build an execution command for running or submitting a job.
-
- Parameters
- ----------
- python : str, optional
- The path to the python executable to use for executing the case. By default
- this will be whatever "python" resolves to in the target environment.
- However when running in more exotic environments (e.g. HPC cluster), it is
- usually desireable to provide a specific python executable.
- """
- command = ""
- if self.cs["numProcessors"] > 1:
- command += "mpiexec -n {} ".format(self.cs["numProcessors"])
- if self.cs["mpiTasksPerNode"] > 0:
- command += "-c {} ".format(self.cs["mpiTasksPerNode"])
-
- command += "{} -u ".format(python)
- if not __debug__:
- command += " -O "
-
- command += ' -m {} run "{}.yaml"'.format(context.APP_NAME, self.cs.caseTitle)
-
- return command
-
def clone(
self,
additionalFiles=None,
diff --git a/armi/cases/suite.py b/armi/cases/suite.py
index f3928403e..222e2397c 100644
--- a/armi/cases/suite.py
+++ b/armi/cases/suite.py
@@ -16,13 +16,12 @@
The ``CaseSuite`` object is responsible for running, and executing a set of user inputs. Many
entry points redirect into ``CaseSuite`` methods, such as ``clone``, ``compare``, and ``submit``.
-Used in conjunction with the :py:class:`~armi.cases.case.Case` object, ``CaseSuite`` can
-be used to collect a series of cases
-and submit them to a cluster for execution. Furthermore, a ``CaseSuite`` can be used to gather
-executed cases for post-analysis.
+Used in conjunction with the :py:class:`~armi.cases.case.Case` object, ``CaseSuite`` can be used to
+collect a series of cases and submit them to a cluster for execution. Furthermore, a ``CaseSuite``
+can be used to gather executed cases for post-analysis.
-``CaseSuite``\ s should allow ``Cases`` to be added from totally separate directories.
-This is useful for plugin-informed testing as well as other things.
+``CaseSuite``\ s should allow ``Cases`` to be added from totally separate directories. This is
+useful for plugin-informed testing as well as other things.
See Also
--------
@@ -32,12 +31,11 @@
from typing import Optional, Sequence
import traceback
-import tabulate
-
from armi import runLog
from armi import settings
from armi.cases import case as armicase
from armi.utils import directoryChangers
+from armi.utils import tabulate
class CaseSuite:
@@ -88,7 +86,12 @@ def __len__(self):
return len(self._cases)
def discover(
- self, rootDir=None, patterns=None, ignorePatterns=None, recursive=True
+ self,
+ rootDir=None,
+ patterns=None,
+ ignorePatterns=None,
+ recursive=True,
+ skipInspection=False,
):
"""
Finds case objects by searching for a pattern of file paths, and adds them to
@@ -106,6 +109,8 @@ def discover(
file patterns to exclude matching file names
recursive : bool, optional
if True, recursively search for settings files
+ skipInspection : bool, optional
+ if True, skip running the check inputs
"""
csFiles = settings.recursivelyLoadSettingsFiles(
rootDir or os.path.abspath(os.getcwd()),
@@ -117,7 +122,8 @@ def discover(
for cs in csFiles:
case = armicase.Case(cs=cs, caseSuite=self)
- case.checkInputs()
+ if not skipInspection:
+ case.checkInputs()
self.add(case)
def echoConfiguration(self):
@@ -148,7 +154,7 @@ def echoConfiguration(self):
for c in self
],
headers=["Title", "Enabled", "Dependencies"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
@@ -213,7 +219,7 @@ def run(self):
with directoryChangers.DirectoryChanger(case.directory):
try:
case.run()
- except: # noqa: bare-except
+ except Exception:
# allow all errors and continue to next run
runLog.error(f"{case} failed during execution.")
traceback.print_exc()
@@ -303,7 +309,7 @@ def writeTable(tableResults):
tabulate.tabulate(
[["Integration test directory: {}".format(os.getcwd())]],
["SUMMARIZED INTEGRATION TEST DIFFERENCES:"],
- tablefmt=fmt,
+ tableFmt=fmt,
)
)
)
@@ -315,10 +321,10 @@ def writeTable(tableResults):
data.append((testName, userFile, refFile, caseIssues))
totalDiffs += caseIssues
- print(tabulate.tabulate(data, header, tablefmt=fmt))
+ print(tabulate.tabulate(data, header, tableFmt=fmt))
print(
tabulate.tabulate(
- [["Total number of differences: {}".format(totalDiffs)]], tablefmt=fmt
+ [["Total number of differences: {}".format(totalDiffs)]], tableFmt=fmt
)
)
diff --git a/armi/cases/tests/test_cases.py b/armi/cases/tests/test_cases.py
index 0483868fa..b04c9fcdd 100644
--- a/armi/cases/tests/test_cases.py
+++ b/armi/cases/tests/test_cases.py
@@ -47,8 +47,7 @@
"""
-# This gets made into a StringIO multiple times because
-# it gets read multiple times.
+# This gets made into a StringIO multiple times because it gets read multiple times.
BLUEPRINT_INPUT = """
nuclide flags:
@@ -139,10 +138,7 @@ def test_getCoverageRcFile(self):
covRcDir = os.path.abspath(context.PROJECT_ROOT)
# Don't actually copy the file, just check the file paths match
covRcFile = case._getCoverageRcFile(userCovFile="", makeCopy=False)
- if platform.system() == "Windows":
- self.assertEqual(covRcFile, os.path.join(covRcDir, "coveragerc"))
- else:
- self.assertEqual(covRcFile, os.path.join(covRcDir, ".coveragerc"))
+ self.assertEqual(covRcFile, os.path.join(covRcDir, "pyproject.toml"))
userFile = "UserCovRc"
covRcFile = case._getCoverageRcFile(userCovFile=userFile, makeCopy=False)
@@ -237,7 +233,10 @@ def test_run(self):
self.assertIn("Triggering BOL Event", mock.getStdout())
self.assertIn("xsGroups", mock.getStdout())
- self.assertIn("Completed EveryNode - cycle 0", mock.getStdout())
+ self.assertIn(
+ "Completed EveryNode - timestep: cycle 0, node 0, year 0.00 Event",
+ mock.getStdout(),
+ )
def test_clone(self):
testTitle = "CLONE_TEST"
@@ -429,10 +428,6 @@ def test_titleSetterGetter(self):
self.c1.title = "new_bob"
self.assertEqual(self.c1.title, "new_bob")
- def test_buildCommand(self):
- cmd = self.c1.buildCommand()
- self.assertEqual(cmd, 'python -u -m armi run "c1.yaml"')
-
class TestCaseSuiteComparison(unittest.TestCase):
"""CaseSuite.compare() tests."""
@@ -448,7 +443,9 @@ def test_compareNoDiffs(self):
"""As a baseline, this test should always reveal zero diffs."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
- TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}
+ TEST_ROOT,
+ customSettings={"reloadDBName": "reloadingDB.h5"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
suites = []
diff --git a/armi/cases/tests/test_suiteBuilder.py b/armi/cases/tests/test_suiteBuilder.py
index e1681091f..4f1c5780e 100644
--- a/armi/cases/tests/test_suiteBuilder.py
+++ b/armi/cases/tests/test_suiteBuilder.py
@@ -116,25 +116,25 @@ def test_buildSuite(self):
SettingModifier("settingName2", value) for value in (3, 4, 5)
)
- self.assertEquals(builder.modifierSets[0][0].value, 1)
- self.assertEquals(builder.modifierSets[0][1].value, 3)
+ self.assertEqual(builder.modifierSets[0][0].value, 1)
+ self.assertEqual(builder.modifierSets[0][1].value, 3)
- self.assertEquals(builder.modifierSets[1][0].value, 2)
- self.assertEquals(builder.modifierSets[1][1].value, 3)
+ self.assertEqual(builder.modifierSets[1][0].value, 2)
+ self.assertEqual(builder.modifierSets[1][1].value, 3)
- self.assertEquals(builder.modifierSets[2][0].value, 1)
- self.assertEquals(builder.modifierSets[2][1].value, 4)
+ self.assertEqual(builder.modifierSets[2][0].value, 1)
+ self.assertEqual(builder.modifierSets[2][1].value, 4)
- self.assertEquals(builder.modifierSets[3][0].value, 2)
- self.assertEquals(builder.modifierSets[3][1].value, 4)
+ self.assertEqual(builder.modifierSets[3][0].value, 2)
+ self.assertEqual(builder.modifierSets[3][1].value, 4)
- self.assertEquals(builder.modifierSets[4][0].value, 1)
- self.assertEquals(builder.modifierSets[4][1].value, 5)
+ self.assertEqual(builder.modifierSets[4][0].value, 1)
+ self.assertEqual(builder.modifierSets[4][1].value, 5)
- self.assertEquals(builder.modifierSets[5][0].value, 2)
- self.assertEquals(builder.modifierSets[5][1].value, 5)
+ self.assertEqual(builder.modifierSets[5][0].value, 2)
+ self.assertEqual(builder.modifierSets[5][1].value, 5)
- self.assertEquals(len(builder.modifierSets), 6)
+ self.assertEqual(len(builder.modifierSets), 6)
class TestSeparateEffectsBuilder(unittest.TestCase):
@@ -155,19 +155,19 @@ def test_buildSuite(self):
SettingModifier("settingName2", value) for value in (3, 4, 5)
)
- self.assertEquals(builder.modifierSets[0][0].value, 1)
- self.assertEquals(builder.modifierSets[0][0].settingName, "settingName1")
+ self.assertEqual(builder.modifierSets[0][0].value, 1)
+ self.assertEqual(builder.modifierSets[0][0].settingName, "settingName1")
- self.assertEquals(builder.modifierSets[1][0].value, 2)
- self.assertEquals(builder.modifierSets[1][0].settingName, "settingName1")
+ self.assertEqual(builder.modifierSets[1][0].value, 2)
+ self.assertEqual(builder.modifierSets[1][0].settingName, "settingName1")
- self.assertEquals(builder.modifierSets[2][0].value, 3)
- self.assertEquals(builder.modifierSets[2][0].settingName, "settingName2")
+ self.assertEqual(builder.modifierSets[2][0].value, 3)
+ self.assertEqual(builder.modifierSets[2][0].settingName, "settingName2")
- self.assertEquals(builder.modifierSets[3][0].value, 4)
- self.assertEquals(builder.modifierSets[3][0].settingName, "settingName2")
+ self.assertEqual(builder.modifierSets[3][0].value, 4)
+ self.assertEqual(builder.modifierSets[3][0].settingName, "settingName2")
- self.assertEquals(builder.modifierSets[4][0].value, 5)
- self.assertEquals(builder.modifierSets[4][0].settingName, "settingName2")
+ self.assertEqual(builder.modifierSets[4][0].value, 5)
+ self.assertEqual(builder.modifierSets[4][0].settingName, "settingName2")
- self.assertEquals(len(builder.modifierSets), 5)
+ self.assertEqual(len(builder.modifierSets), 5)
diff --git a/armi/cli/checkInputs.py b/armi/cli/checkInputs.py
index 2685b4c29..cd191779d 100644
--- a/armi/cli/checkInputs.py
+++ b/armi/cli/checkInputs.py
@@ -90,8 +90,8 @@ def addOptions(self):
)
def invoke(self):
- import tabulate
from armi import cases
+ from armi.utils import tabulate
suite = cases.CaseSuite(self.cs)
suite.discover(patterns=self.args.patterns, recursive=self.args.recursive)
@@ -118,7 +118,7 @@ def invoke(self):
tabulate.tabulate(
table,
headers=["case", "can start", "input is self consistent"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
diff --git a/armi/cli/compareCases.py b/armi/cli/compareCases.py
index b42e97f97..cd5acd157 100644
--- a/armi/cli/compareCases.py
+++ b/armi/cli/compareCases.py
@@ -162,6 +162,15 @@ def addOptions(self):
default=[],
help="Pattern to search for inputs to ignore.",
)
+ self.parser.add_argument(
+ "--skip-inspection",
+ "-I",
+ action="store_true",
+ default=False,
+ help="Skip inspection. By default, setting files are checked for integrity and consistency. These "
+ "checks result in needing to manually resolve a number of differences. Using this option will "
+ "suppress the inspection step.",
+ )
def invoke(self):
from armi import cases
@@ -188,6 +197,7 @@ def invoke(self):
rootDir=self.args.reference,
patterns=allTests,
ignorePatterns=self.args.ignore,
+ skipInspection=self.args.skip_inspection,
)
cmpSuite = cases.CaseSuite(self.cs)
@@ -195,6 +205,7 @@ def invoke(self):
rootDir=self.args.comparison,
patterns=self.args.patterns,
ignorePatterns=self.args.ignore,
+ skipInspection=self.args.skip_inspection,
)
nIssues = refSuite.compare(
@@ -206,4 +217,4 @@ def invoke(self):
)
if nIssues > 0:
- sys.exit(nIssues)
+ sys.exit(1)
diff --git a/armi/cli/entryPoint.py b/armi/cli/entryPoint.py
index ef1ffef5b..bef74f07d 100644
--- a/armi/cli/entryPoint.py
+++ b/armi/cli/entryPoint.py
@@ -246,7 +246,7 @@ def createOptionFromSetting(
if additionalAlias is not None:
aliases.append(additionalAlias)
- isListType = settingsInstance.underlyingType == list
+ isListType = settingsInstance.underlyingType is list
try:
self.parser.add_argument(
diff --git a/armi/cli/reportsEntryPoint.py b/armi/cli/reportsEntryPoint.py
index 263081a05..efb2802fc 100644
--- a/armi/cli/reportsEntryPoint.py
+++ b/armi/cli/reportsEntryPoint.py
@@ -19,15 +19,16 @@
from armi.cli import entryPoint
from armi.reactor import blueprints
from armi.reactor import reactors
-from armi.utils import directoryChangers
+from armi.utils.directoryChangers import ForcedCreationDirectoryChanger
class ReportsEntryPoint(entryPoint.EntryPoint):
- """Create report from database files."""
+ """Create a report from a database file."""
name = "report"
settingsArgument = "optional"
description = "Convert ARMI databases into a report"
+ report_out_dir = "reportsOutputFiles"
def __init__(self):
entryPoint.EntryPoint.__init__(self)
@@ -43,8 +44,7 @@ def addOptions(self):
self.parser.add_argument(
"--output-name",
"-o",
- help="Base name for output file(s). File extensions will be added as "
- "appropriate",
+ help="Base name for output file(s). File extensions will be added as appropriate",
type=str,
default=None,
)
@@ -57,15 +57,15 @@ def addOptions(self):
)
self.parser.add_argument(
"--max-node",
- help="An optional (cycle,timeNode) tuple to specify the latest time step "
- "that should be included",
+ help="An optional (cycle,timeNode) tuple to specify the latest time step that should "
+ "be included",
type=str,
default=None,
)
self.parser.add_argument(
"--min-node",
- help="An optional (cycle,timeNode) tuple to specify the earliest time step "
- "that should be included",
+ help="An optional (cycle,timeNode) tuple to specify the earliest time step that should "
+ "be included",
type=str,
default=None,
)
@@ -78,31 +78,22 @@ def addOptions(self):
)
def invoke(self):
- nodes = self.args.nodes
-
if self.args.h5db is None:
- # Just do begining stuff, no database is given...
- if self.cs is not None:
- site = createReportFromSettings(self.cs)
- if self.args.view:
- webbrowser.open(site)
- else:
- raise RuntimeError(
- "No Settings with Blueprint or Database, cannot gerenate a report"
- )
-
+ # Just do BOL stuff, no database is given.
+ site = createReportFromSettings(self.cs)
+ if self.args.view:
+ webbrowser.open(site)
else:
+ self._cleanArgs()
+ nodes = self.args.nodes
+ blueprint = self.args.bp
+
report = reports.ReportContent("Overview")
pm = getPluginManagerOrFail()
db = databaseFactory(self.args.h5db, "r")
- if self.args.bp is not None:
- blueprint = self.args.bp
with db:
- with directoryChangers.ForcedCreationDirectoryChanger(
- "reportsOutputFiles"
- ):
-
+ with ForcedCreationDirectoryChanger(self.report_out_dir):
dbNodes = list(db.genTimeSteps())
cs = db.loadCS()
if self.args.bp is None:
@@ -118,6 +109,7 @@ def invoke(self):
)
stage = reports.ReportStage.Standard
for cycle, node in dbNodes:
+ # check to see if we should skip this time node
if nodes is not None and (cycle, node) not in nodes:
continue
@@ -148,23 +140,68 @@ def invoke(self):
if self.args.view:
webbrowser.open(site)
+ @staticmethod
+ def toTwoTuple(strInput):
+ """Convert a string to a two-tuple of integers.
+
+ Parameters
+ ----------
+ strInput : str
+ Representing a simple two-tuple of integers: '(1,3)'.
+
+ Returns
+ -------
+ tuple
+ A tuple of two integers.
+ """
+ s = strInput.replace("(", "").replace(")", "").split(",")
+ return tuple([int(s[0]), int(s[1])])
+
+ def _cleanArgs(self):
+ """The string arguments passed to this entry point, on the command line, need to be
+ converted to integers.
+ """
+ if self.args.min_node is not None and type(self.args.min_node) is str:
+ self.args.min_node = ReportsEntryPoint.toTwoTuple(self.args.min_node)
+
+ if self.args.max_node is not None and type(self.args.max_node) is str:
+ self.args.max_node = ReportsEntryPoint.toTwoTuple(self.args.max_node)
+
+ if self.args.nodes is not None and type(self.args.nodes) is str:
+ self.args.nodes = [
+ ReportsEntryPoint.toTwoTuple(n) for n in self.args.nodes.split(")")[:-1]
+ ]
+
def createReportFromSettings(cs):
"""
Create BEGINNING reports, given a settings file.
- This will construct a reactor from the given settings and create BOL reports for
- that reactor/settings.
+ This will construct a reactor from the given settings and create BOL reports for that
+ reactor/settings.
+
+ Parameters
+ ----------
+ cs : Settings
+ A standard ARMI Settings object, to define a run.
+
+ Returns
+ -------
+ str
+ A string representing the HTML for a web page.
"""
+ if cs is None:
+ raise RuntimeError(
+ "No Settings with Blueprint or Database, cannot gerenate a report"
+ )
+
blueprint = blueprints.loadFromCs(cs)
r = reactors.factory(cs, blueprint)
report = reports.ReportContent("Overview")
pm = getPluginManagerOrFail()
report.title = r.name
- with directoryChangers.ForcedCreationDirectoryChanger(
- "{}-reports".format(cs.caseTitle)
- ):
+ with ForcedCreationDirectoryChanger("{}-reports".format(cs.caseTitle)):
_ = pm.hook.getReportContents(
r=r,
cs=cs,
diff --git a/armi/cli/tests/test_runEntryPoint.py b/armi/cli/tests/test_runEntryPoint.py
index 3ec38f25f..b0e541d65 100644
--- a/armi/cli/tests/test_runEntryPoint.py
+++ b/armi/cli/tests/test_runEntryPoint.py
@@ -12,12 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for run cli entry point."""
+from glob import glob
from shutil import copyfile
+import logging
import os
import sys
import unittest
+from armi import runLog
from armi.__main__ import main
+from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint
from armi.cli.checkInputs import CheckInputEntryPoint, ExpandBlueprints
from armi.cli.clone import CloneArmiRunCommandBatch, CloneSuiteCommand
@@ -30,11 +34,56 @@
from armi.cli.run import RunEntryPoint
from armi.cli.runSuite import RunSuiteCommand
from armi.physics.neutronics.diffIsotxs import CompareIsotxsLibraries
+from armi.reactor.tests.test_reactors import loadTestReactor, reduceTestReactorRings
from armi.tests import mockRunLogs, TEST_ROOT, ARMI_RUN_PATH
from armi.utils.directoryChangers import TemporaryDirectoryChanger
from armi.utils.dynamicImporter import getEntireFamilyTree
+def buildTestDB(fileName, numNodes=1, numCycles=1):
+ """This function builds a (super) simple test DB.
+
+ Notes
+ -----
+ This needs to be run inside a temp directory.
+
+ Parameters
+ ----------
+ fileName : str
+ The file name (not path) we want for the ARMI test DB.
+ numNodes : int, optional
+ The number of nodes we want in the DB, default 1.
+ numCycles : int, optional
+ The number of cycles we want in the DB, default 1.
+
+ Returns
+ -------
+ str
+ Database file name.
+ """
+ o, r = loadTestReactor(
+ TEST_ROOT,
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
+
+ # create the tests DB
+ dbi = DatabaseInterface(r, o.cs)
+ dbi.initDB(fName=f"{fileName}.h5")
+ db = dbi.database
+
+ # populate the db with something
+ r.p.cycle = 0
+ for node in range(abs(numNodes)):
+ for cycle in range(abs(numCycles)):
+ r.p.timeNode = node
+ r.p.cycle = cycle
+ r.p.cycleLength = 100
+ db.writeToDB(r)
+
+ db.close()
+ return f"{fileName}.h5"
+
+
class TestInitializationEntryPoints(unittest.TestCase):
def test_entryPointInitialization(self):
"""Tests the initialization of all subclasses of `EntryPoint`.
@@ -90,6 +139,8 @@ def test_checkInputEntryPointInvoke(self):
ci.parse_args([ARMI_RUN_PATH])
with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_checkInputEntryPointInvoke")
+ runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ci.invoke()
@@ -171,24 +222,31 @@ def test_cloneSuiteCommandBasics(self):
class TestCompareCases(unittest.TestCase):
def test_compareCasesBasics(self):
- cc = CompareCases()
- cc.addOptions()
- cc.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5"])
+ with TemporaryDirectoryChanger():
+ cc = CompareCases()
+ cc.addOptions()
+ cc.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5"])
- self.assertEqual(cc.name, "compare")
- self.assertIsNone(cc.args.timestepCompare)
- self.assertIsNone(cc.args.weights)
+ self.assertEqual(cc.name, "compare")
+ self.assertIsNone(cc.args.timestepCompare)
+ self.assertIsNone(cc.args.weights)
+
+ with self.assertRaises(ValueError):
+ # The "fake" files do exist, so this should fail.
+ cc.invoke()
class TestCompareSuites(unittest.TestCase):
def test_compareSuitesBasics(self):
- cs = CompareSuites()
- cs.addOptions()
- cs.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5"])
+ with TemporaryDirectoryChanger():
+ cs = CompareSuites()
+ cs.addOptions()
+ cs.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5", "-I"])
- self.assertEqual(cs.name, "compare-suites")
- self.assertEqual(cs.args.reference, "/path/to/fake1.h5")
- self.assertIsNone(cs.args.weights)
+ self.assertEqual(cs.name, "compare-suites")
+ self.assertEqual(cs.args.reference, "/path/to/fake1.h5")
+ self.assertTrue(cs.args.skip_inspection)
+ self.assertIsNone(cs.args.weights)
class TestExpandBlueprints(unittest.TestCase):
@@ -202,6 +260,8 @@ def test_expandBlueprintsBasics(self):
# Since the file is fake, invoke() should exit early.
with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_expandBlueprintsBasics")
+ runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ebp.invoke()
self.assertIn("does not exist", mock.getStdout())
@@ -209,18 +269,36 @@ def test_expandBlueprintsBasics(self):
class TestExtractInputs(unittest.TestCase):
def test_extractInputsBasics(self):
- ei = ExtractInputs()
- ei.addOptions()
- ei.parse_args(["/path/to/fake"])
-
- self.assertEqual(ei.name, "extract-inputs")
- self.assertEqual(ei.args.output_base, "/path/to/fake")
-
- with mockRunLogs.BufferLog() as mock:
- self.assertEqual("", mock.getStdout())
- with self.assertRaises(FileNotFoundError):
- # The "fake" file doesn't exist, so this should fail.
+ with TemporaryDirectoryChanger() as newDir:
+ # build test DB
+ o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ dbi = DatabaseInterface(r, o.cs)
+ dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5")
+ dbi.initDB(fName=dbPath)
+ db = dbi.database
+ db.writeToDB(r)
+
+ # init the CLI
+ ei = ExtractInputs()
+ ei.addOptions()
+ ei.parse_args([dbPath])
+
+ # test the CLI initialization
+ self.assertEqual(ei.name, "extract-inputs")
+ self.assertEqual(ei.args.output_base, dbPath[:-3])
+
+ # run the CLI on a test DB, verify it worked via logging
+ with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_extractInputsBasics")
+ runLog.LOG.setVerbosity(logging.INFO)
+ self.assertEqual("", mock.getStdout())
ei.invoke()
+ self.assertIn("Writing settings to", mock.getStdout())
+ self.assertIn("Writing blueprints to", mock.getStdout())
+
+ db.close()
class TestInjectInputs(unittest.TestCase):
@@ -238,6 +316,8 @@ def test_injectInputsInvokeIgnore(self):
ii.parse_args(["/path/to/fake.h5"])
with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_injectInputsInvokeIgnore")
+ runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ii.invoke()
self.assertIn("No settings", mock.getStdout())
@@ -252,11 +332,9 @@ def test_injectInputsInvokeNoData(self):
ii.parse_args(["/path/to/fake.h5", "--blueprints", bp])
# invoke and check log
- with mockRunLogs.BufferLog() as mock:
- self.assertEqual("", mock.getStdout())
- with self.assertRaises(FileNotFoundError):
- # The "fake.h5" doesn't exist, so this should fail.
- ii.invoke()
+ with self.assertRaises(FileNotFoundError):
+ # The "fake.h5" doesn't exist, so this should fail.
+ ii.invoke()
class TestMigrateInputs(unittest.TestCase):
@@ -303,19 +381,76 @@ def test_modifyCaseSettingsCommandInvoke(self):
class TestReportsEntryPoint(unittest.TestCase):
- def test_reportsEntryPointBasics(self):
+ def test_toTwoTuple(self):
+ result = ReportsEntryPoint.toTwoTuple("(1,2)")
+ self.assertEqual(result, (1, 2))
+
+ result = ReportsEntryPoint.toTwoTuple("(-931,223)")
+ self.assertEqual(result, (-931, 223))
+
+ result = ReportsEntryPoint.toTwoTuple("(-7,7")
+ self.assertEqual(result, (-7, 7))
+
+ # here is a funny edge case
+ result = ReportsEntryPoint.toTwoTuple("(1,2,3)")
+ self.assertEqual(result, (1, 2))
+
+ # test some cases that SHOULD fail
+ with self.assertRaises(ValueError):
+ ReportsEntryPoint.toTwoTuple("(1,)")
+
+ with self.assertRaises(ValueError):
+ ReportsEntryPoint.toTwoTuple("()")
+
+ with self.assertRaises(ValueError):
+ ReportsEntryPoint.toTwoTuple("[1,5]")
+
+ def test_cleanArgs(self):
rep = ReportsEntryPoint()
rep.addOptions()
- rep.parse_args(["-h5db", "/path/to/fake.h5"])
- self.assertEqual(rep.name, "report")
- self.assertEqual(rep.settingsArgument, "optional")
+ node0 = "(0,0)"
+ node3 = "(3,3)"
+ nodesStr = "(0,2)(1,3)(2,9)"
- with mockRunLogs.BufferLog() as mock:
- self.assertEqual("", mock.getStdout())
- with self.assertRaises(ValueError):
- # The "fake.h5" doesn't exist, so this should fail.
- rep.invoke()
+ rep.parse_args(["--nodes", nodesStr])
+ self.assertEqual(rep.args.nodes, nodesStr)
+ rep._cleanArgs()
+ self.assertEqual(rep.args.nodes[0], (0, 2))
+ self.assertEqual(rep.args.nodes[1], (1, 3))
+ self.assertEqual(rep.args.nodes[2], (2, 9))
+
+ rep.parse_args(["--min-node", node0])
+ self.assertEqual(rep.args.min_node, node0)
+ rep._cleanArgs()
+ self.assertEqual(rep.args.min_node, (0, 0))
+
+ rep.parse_args(["--max-node", node3])
+ self.assertEqual(rep.args.max_node, node3)
+ rep._cleanArgs()
+ self.assertEqual(rep.args.max_node, (3, 3))
+
+ def test_reportsEntryPointBasics(self):
+ with TemporaryDirectoryChanger() as newDir:
+ # set up output names
+ fileNameDB = buildTestDB(self._testMethodName, 1, 1)
+ outputFile = f"{self._testMethodName}.txt"
+ outDir = os.path.join(newDir.destination, "reportsOutputFiles")
+
+ # define report
+ rep = ReportsEntryPoint()
+ rep.addOptions()
+ rep.parse_args(["-h5db", fileNameDB, "-o", outputFile])
+
+ # validate report options
+ self.assertEqual(rep.name, "report")
+ self.assertEqual(rep.settingsArgument, "optional")
+
+ # Run report, and make sure there are output files
+ rep.invoke()
+ self.assertTrue(os.path.exists(os.path.join(outDir, "index.html")))
+ outFiles = glob(os.path.join(outDir, f"*{self._testMethodName}*"))
+ self.assertGreater(len(outFiles), 2)
class TestCompareIsotxsLibsEntryPoint(unittest.TestCase):
@@ -329,6 +464,10 @@ def test_compareIsotxsLibsBasics(self):
self.assertEqual(com.name, "diff-isotxs")
self.assertIsNone(com.settingsArgument)
+ with self.assertRaises(FileNotFoundError):
+ # The provided files don't exist, so this should fail.
+ com.invoke()
+
class TestRunEntryPoint(unittest.TestCase):
def test_runEntryPointBasics(self):
@@ -363,17 +502,63 @@ class TestRunSuiteCommand(unittest.TestCase):
def test_runSuiteCommandBasics(self):
rs = RunSuiteCommand()
rs.addOptions()
- rs.parse_args(["/path/to/fake.yaml"])
+ rs.parse_args(["/path/to/fake.yaml", "-l"])
self.assertEqual(rs.name, "run-suite")
self.assertIsNone(rs.settingsArgument)
+ # test the invoke method
+ with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_runSuiteCommandBasics")
+ runLog.LOG.setVerbosity(logging.INFO)
+ self.assertEqual("", mock.getStdout())
+ rs.invoke()
+ self.assertIn("Finding potential settings files", mock.getStdout())
+ self.assertIn("Checking for valid settings", mock.getStdout())
+ self.assertIn("Primary Log Verbosity", mock.getStdout())
+
class TestVisFileEntryPointCommand(unittest.TestCase):
def test_visFileEntryPointBasics(self):
- vf = VisFileEntryPoint()
- vf.addOptions()
- vf.parse_args(["/path/to/fake.h5"])
+ with TemporaryDirectoryChanger() as newDir:
+ # build test DB
+ self.o, self.r = loadTestReactor(
+ TEST_ROOT,
+ customSettings={"reloadDBName": "reloadingDB.h5"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
+ reduceTestReactorRings(self.r, self.o.cs, maxNumRings=2)
+ self.dbi = DatabaseInterface(self.r, self.o.cs)
+ dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5")
+ self.dbi.initDB(fName=dbPath)
+ self.db = self.dbi.database
+ self.db.writeToDB(self.r)
+
+ # create Viz entry point
+ vf = VisFileEntryPoint()
+ vf.addOptions()
+ vf.parse_args([dbPath])
+
+ self.assertEqual(vf.name, "vis-file")
+ self.assertIsNone(vf.settingsArgument)
+
+ # test the invoke method
+ with mockRunLogs.BufferLog() as mock:
+ runLog.LOG.startLog("test_visFileEntryPointBasics")
+ runLog.LOG.setVerbosity(logging.INFO)
+ self.assertEqual("", mock.getStdout())
+
+ vf.invoke()
+
+ desired = "Creating visualization file for cycle 0, time node 0..."
+ self.assertIn(desired, mock.getStdout())
+
+ # test the parse method (using the same DB to save time)
+ vf = VisFileEntryPoint()
+ vf.parse([dbPath])
+ self.assertIsNone(vf.args.nodes)
+ self.assertIsNone(vf.args.min_node)
+ self.assertIsNone(vf.args.max_node)
+ self.assertEqual(vf.args.output_name, "test_visFileEntryPointBasics")
- self.assertEqual(vf.name, "vis-file")
- self.assertIsNone(vf.settingsArgument)
+ self.db.close()
diff --git a/armi/context.py b/armi/context.py
index 477a7da1a..71bb5b9bb 100644
--- a/armi/context.py
+++ b/armi/context.py
@@ -15,10 +15,8 @@
"""
Module containing global constants that reflect the executing context of ARMI.
-This contains information about the circumstatces under which an ARMI application is
-running. Things like the MPI environment, executing user, etc. live here. These are
-re-exported by the `armi` package, but live here so that import loops won't lead to
-as many issues.
+ARMI's global state information: operating system information, environment data, user data, memory
+parallelism, temporary storage locations, and if operational mode (interactive, gui, or batch).
"""
from logging import DEBUG
import datetime
@@ -29,14 +27,8 @@
import sys
import time
-# h5py needs to be imported here, so that the disconnectAllHdfDBs() call that gets bound
-# to atexit below doesn't lead to a segfault on python exit. The Database3 module is
-# imported at call time, since it itself needs stuff that is initialized in this module
-# to import properly. However, if that import leads to the first time that h5py is
-# imported in this process, doing so will cause a segfault. The theory here is that this
-# happens because the h5py extension module is not safe to import (for whatever reason)
-# when the python interpreter is in whatever state it's in when the atexit callbacks are
-# being invoked. Importing early avoids this.
+# h5py needs to be imported here, so that the disconnectAllHdfDBs() call that gets bound to atexit
+# below doesn't lead to a segfault on python exit.
#
# Minimal code to reproduce the issue:
#
@@ -47,31 +39,25 @@
#
# >>> atexit.register(willSegFault)
-import h5py # noqa: unused-import
+import h5py # noqa: F401
BLUEPRINTS_IMPORTED = False
BLUEPRINTS_IMPORT_CONTEXT = ""
-# App name is used when spawning new tasks that should invoke a specific ARMI
-# application. For instance, the framework provides some features to help with
-# submitting tasks to an HPC cluster. Sometimes these tasks are themselves only using
-# ARMI functionality, so running `python -m armi` is fine. Other times, the task is
-# specific to an application, requiring something like `python -m myArmiApp`
+# App name is used when spawning new tasks that should invoke a specific ARMI application. Sometimes
+# these tasks only use ARMI functionality, so running `python -m armi` is fine. Other times, the
+# task is specific to an application, requiring something like: `python -m myArmiApp`
APP_NAME = "armi"
class Mode(enum.Enum):
"""
- Mode represents different run modes possible in ARMI.
+ Mode represents different run types possible in ARMI.
- The modes can be Batch, Interactive, or GUI. In different modes, there are different
- types of interactions possible.
-
- Mode is generally auto-detected based on your terminal. It can also be set in
- various CLI entry points, which are the implementations of
- :py:class:`armi.cli.entryPoint.EntryPoint`. Lastly, each entry point has a
- ``--batch`` command line argument that can force Batch mode.
+ The modes can be Batch, Interactive, or GUI. Mode is generally auto-detected based on your
+ terminal. It can also be set in various CLI entry points. Each entry point has a ``--batch``
+ command line argument that can force Batch mode.
"""
BATCH = 1
@@ -93,29 +79,30 @@ def setMode(cls, mode):
USER = getpass.getuser()
START_TIME = time.ctime()
-# Set batch mode if not a TTY, which means you're on a cluster writing to a stdout file
-# In this mode you cannot respond to prompts or anything
-CURRENT_MODE = Mode.INTERACTIVE if sys.stdout.isatty() else Mode.BATCH
+# Set batch mode if not a TTY, which means you're on a cluster writing to a stdout file. In this
+# mode you cannot respond to prompts. (This does not work reliably for both Windows and Linux so an
+# os-specific solution is applied.)
+isatty = sys.stdout.isatty() if "win" in sys.platform else sys.stdin.isatty()
+CURRENT_MODE = Mode.INTERACTIVE if isatty else Mode.BATCH
Mode.setMode(CURRENT_MODE)
MPI_COMM = None
# MPI_RANK represents the index of the CPU that is running.
# 0 is typically the primary CPU, while 1+ are typically workers.
-# MPI_SIZE is the total number of CPUs
MPI_RANK = 0
+# MPI_SIZE is the total number of CPUs.
MPI_SIZE = 1
-MPI_NODENAME = "local"
-MPI_NODENAMES = ["local"]
+LOCAL = "local"
+MPI_NODENAME = LOCAL
+MPI_NODENAMES = [LOCAL]
try:
- # Check for MPI
- # The mpi4py module uses cPickle to serialize python objects in preparation for
- # network transmission. Sometimes, when cPickle fails, it gives very cryptic error
- # messages that do not help much. If you uncomment th following line, you can trick
- # mpi4py into using the pure-python pickle module in place of cPickle and now you
- # will generally get much more meaningful and useful error messages Then comment it
- # back out because it's slow.
+ # Check for MPI. The mpi4py module uses cPickle to serialize python objects in preparation for
+ # network transmission. Sometimes, when cPickle fails, it gives very cryptic error messages that
+ # do not help much. If you uncomment th following line, you can trick mpi4py into using the
+ # pure-python pickle module in place of cPickle and now you will generally get much more
+ # meaningful and useful error messages Then comment it back out because it's slow.
# import sys, pickle; sys.modules['cPickle'] = pickle
from mpi4py import MPI
@@ -129,11 +116,11 @@ def setMode(cls, mode):
pass
try:
- # trying a windows approach
+ # trying a Windows approach
APP_DATA = os.path.join(os.environ["APPDATA"], "armi")
APP_DATA = APP_DATA.replace("/", "\\")
-except: # noqa: bare-except
- # non-windows
+except Exception:
+ # non-Windows
APP_DATA = os.path.expanduser("~/.armi")
if MPI_NODENAMES.index(MPI_NODENAME) == MPI_RANK:
@@ -146,13 +133,14 @@ def setMode(cls, mode):
raise OSError("Directory doesn't exist {0}".format(APP_DATA))
if MPI_COMM is not None:
- MPI_COMM.barrier() # make sure app data exists before workers proceed.
+ # Make sure app data exists before workers proceed.
+ MPI_COMM.barrier()
MPI_DISTRIBUTABLE = MPI_SIZE > 1
_FAST_PATH = os.path.join(os.getcwd())
"""
-A directory available for high-performance I/O
+A directory available for high-performance I/O.
.. warning:: This is not a constant and can change at runtime.
"""
@@ -165,17 +153,16 @@ def activateLocalFastPath() -> None:
"""
Specify a local temp directory to be the fast path.
- ``FAST_PATH`` is often a local hard drive on a cluster node. It's a high-performance
+ ``FAST_PATH`` is often a local hard drive on a cluster node. It should be a high-performance
scratch space. Different processors on the same node should have different fast paths.
- Some old code may MPI_RANK-dependent folders/filenames as well, but this is no longer
- necessary.
- .. warning:: This path will be obliterated when the job ends so be careful.
+ Notes
+ -----
+ This path will be obliterated when the job ends.
- Note also
- that this path is set at import time, so if a series of unit tests come through that
- instantiate one operator after the other, the path will already exist the second time.
- The directory is created in the Operator constructor.
+ This path is set at import time, so if a series of unit tests come through that instantiate one
+ operator after the other, the path will already exist the second time. The directory is created
+ in the Operator constructor.
"""
global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA
@@ -201,9 +188,8 @@ def getFastPath() -> str:
Notes
-----
- It's too dangerous to use ``FAST_PATH`` directly as it can change between import and
- runtime. For example, a module that does ``from armi.context import FAST_PATH`` is
- disconnected from the official ``FAST_PATH`` controlled by this module.
+ This exists because it's dangerous to use ``FAST_PATH`` directly. as it can change between
+ import and runtime.
"""
return _FAST_PATH
@@ -212,19 +198,17 @@ def cleanTempDirs(olderThanDays=None):
"""
Clean up temporary files after a run.
- The Windows HPC system sends a SIGBREAK signal when the user cancels a job, which
- is NOT handled by ``atexit``. Notably SIGBREAK doesn't exist off Windows.
- For the SIGBREAK signal to work with a Microsoft HPC, the ``TaskCancelGracePeriod``
- option must be configured to be non-zero. This sets the period between SIGBREAK
- and SIGTERM/SIGINT. To do cleanups in this case, we must use the ``signal`` module.
- Actually, even then it does not work because MS ``mpiexec`` does not pass signals
- through.
+ Some Windows HPC systems send a SIGBREAK signal when the user cancels a job, which is NOT
+ handled by ``atexit``. Notably, SIGBREAK does not exist outside Windows. For the SIGBREAK signal
+ to work with a Windows HPC, the ``TaskCancelGracePeriod`` option must be configured to be non-
+ zero. This sets the period between SIGBREAK and SIGTERM/SIGINT. To do cleanups in this case, we
+ must use the ``signal`` module. Actually, even then it does not work because MS ``mpiexec`` does
+ not pass signals through.
Parameters
----------
olderThanDays: int, optional
- If provided, deletes other ARMI directories if they are older than the requested
- time.
+ If provided, deletes other ARMI directories if they are older than the requested time.
"""
from armi import runLog
from armi.utils.pathTools import cleanPath
@@ -254,12 +238,14 @@ def cleanTempDirs(olderThanDays=None):
def cleanAllArmiTempDirs(olderThanDays: int) -> None:
"""
- Delete all ARMI-related files from other unrelated runs after `olderThanDays` days (in
- case this failed on earlier runs).
-
- .. warning:: This will break any concurrent runs that are still running.
+ Delete all ARMI-related files from other unrelated runs after `olderThanDays` days (in case this
+ failed on earlier runs).
This is a useful utility in HPC environments when some runs crash sometimes.
+
+ Warning
+ -------
+ This will break any concurrent runs that are still running.
"""
from armi.utils.pathTools import cleanPath
@@ -279,7 +265,7 @@ def cleanAllArmiTempDirs(olderThanDays: int) -> None:
if runIsOldAndLikleyComplete or fromThisRun:
# Delete old files
cleanPath(dirPath, mpiRank=MPI_RANK)
- except: # noqa: bare-except
+ except Exception:
pass
@@ -289,14 +275,12 @@ def disconnectAllHdfDBs() -> None:
Notes
-----
- This is a hack to help ARMI exit gracefully when the garbage collector and h5py have
- issues destroying objects. After lots of investigation, the root cause for why this
- was having issues was never identified. It appears that when several HDF5 files are
- open in the same run (e.g. when calling armi.init() multiple times from a
- post-processing script), when these h5py File objects were closed, the garbage
- collector would raise an exception related to the repr'ing the object. We
- get around this by using the garbage collector to manually disconnect all open HdfDB
- objects.
+ This is a hack to help ARMI exit gracefully when the garbage collector and h5py have issues
+ destroying objects. The root cause for why this was having issues was never identified. It
+ appears that when several HDF5 files are open in the same run (e.g. when calling ``armi.init()``
+ multiple times from a post-processing script), when these h5py File objects were closed, the
+ garbage collector would raise an exception related to the repr'ing the object. We get around
+ this by using the garbage collector to manually disconnect all open HdfDBs.
"""
from armi.bookkeeping.db import Database3
diff --git a/armi/interfaces.py b/armi/interfaces.py
index 2855274b4..ee1c360ea 100644
--- a/armi/interfaces.py
+++ b/armi/interfaces.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-r"""
+"""
Interfaces are objects of code that interact with ARMI. They read information off the state,
perform calculations (or run external codes), and then store the results back in the state.
@@ -29,7 +29,7 @@
from typing import List
from typing import Dict
-import numpy
+import numpy as np
from numpy.linalg import norm
from armi import getPluginManagerOrFail, settings, utils
@@ -38,27 +38,27 @@
from armi.utils import textProcessors
-class STACK_ORDER: # noqa: invalid-class-name
+class STACK_ORDER: # noqa: N801
"""
Constants that help determine the order of modules in the interface stack.
- Each module defines an ``ORDER`` constant that specifies where in this order it
- should be placed in the Interface Stack.
+ Each module defines an ``ORDER`` constant that specifies where in this order it should be placed
+ in the Interface Stack.
.. impl:: Define an ordered list of interfaces.
:id: I_ARMI_OPERATOR_INTERFACES0
:implements: R_ARMI_OPERATOR_INTERFACES
- At each time node during a simulation, an ordered colletion of Interfaces
- are run (referred to as the interface stack). But ARMI does not force the order upon the analyst.
- Instead, each Interface registers where in that ordered list it belongs by
- giving itself an order number (which can be an integer or a decimal).
- This class defines a set of constants which can be imported and used
- by Interface developers to define that Interface's position in the stack.
+ At each time node during a simulation, an ordered colletion of Interfaces are run (referred
+ to as the interface stack). But ARMI does not force the order upon the analyst. Instead,
+ each Interface registers where in that ordered list it belongs by giving itself an order
+ number (which can be an integer or a decimal). This class defines a set of constants which
+ can be imported and used by Interface developers to define that Interface's position in the
+ stack.
- The constants defined are given names, based on common stack orderings
- in the ARMI ecosystem. But in the end, these are just constant values,
- and the names they are given are merely suggestions.
+ The constants defined are given names, based on common stack orderings in the ARMI
+ ecosystem. But in the end, these are just constant values, and the names they are given are
+ merely suggestions.
See Also
--------
@@ -118,7 +118,7 @@ class TightCoupler:
Maximum number of tight coupling iterations allowed
"""
- _SUPPORTED_TYPES = [float, int, list, numpy.ndarray]
+ _SUPPORTED_TYPES = [float, int, list, np.ndarray]
def __init__(self, param, tolerance, maxIters):
self.parameter = param
@@ -126,7 +126,7 @@ def __init__(self, param, tolerance, maxIters):
self.maxIters = maxIters
self._numIters = 0
self._previousIterationValue = None
- self.eps = numpy.inf
+ self.eps = np.inf
def __repr__(self):
return (
@@ -158,12 +158,14 @@ def storePreviousIterationValue(self, val: _SUPPORTED_TYPES):
def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
"""
- Return boolean indicating if the convergence criteria between the current and previous iteration values are met.
+ Return boolean indicating if the convergence criteria between the current and previous
+ iteration values are met.
Parameters
----------
val : _SUPPORTED_TYPES
- the most recent value for computing convergence critera. Is commonly equal to interface.getTightCouplingValue()
+ The most recent value for computing convergence critera. Is commonly equal to
+ interface.getTightCouplingValue()
Returns
-------
@@ -172,18 +174,19 @@ def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
Notes
-----
- - On convergence, this class is automatically reset to its initial condition to avoid retaining
- or holding a stale state. Calling this method will increment a counter that when exceeded will
- clear the state. A warning will be reported if the state is cleared prior to the convergence
- criteria being met.
- - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from considering
- component level parameters. However, converging on component level parameters is not supported at this time.
+ - On convergence, this class is automatically reset to its initial condition to avoid
+ retaining or holding a stale state. Calling this method will increment a counter that when
+ exceeded will clear the state. A warning will be reported if the state is cleared prior to
+ the convergence criteria being met.
+ - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from
+ considering component level parameters. However, converging on component level parameters
+ is not supported at this time.
Raises
------
ValueError
- If the previous iteration value has not been assigned. The ``storePreviousIterationValue`` method
- must be called first.
+ If the previous iteration value has not been assigned. The
+ ``storePreviousIterationValue`` method must be called first.
RuntimeError
Only support calculating norms for up to 2D arrays.
"""
@@ -201,19 +204,19 @@ def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
else:
dim = self.getListDimension(val)
if dim == 1: # 1D array
- self.eps = norm(numpy.subtract(val, previous), ord=2)
+ self.eps = norm(np.subtract(val, previous), ord=2)
elif dim == 2: # 2D array
epsVec = []
for old, new in zip(previous, val):
- epsVec.append(norm(numpy.subtract(old, new), ord=2))
- self.eps = norm(epsVec, ord=numpy.inf)
+ epsVec.append(norm(np.subtract(old, new), ord=2))
+ self.eps = norm(epsVec, ord=np.inf)
else:
raise RuntimeError(
"Currently only support up to 2D arrays for calculating convergence of arrays."
)
- # Check if convergence is satisfied. If so, or if reached max number of iters, then
- # reset the number of iterations
+ # Check if convergence is satisfied. If so, or if reached max number of iters, then reset
+ # the number of iterations
converged = self.eps < self.tolerance
if converged:
self._numIters = 0
@@ -285,16 +288,15 @@ def getInputFiles(cls, cs):
name: Union[str, None] = None
"""
- The name of the interface. This is undefined for the base class, and must be
- overridden by any concrete class that extends this one.
+ The name of the interface. This is undefined for the base class, and must be overridden by any
+ concrete class that extends this one.
"""
# TODO: This is a terrible name.
function = None
"""
- The function performed by an Interface. This is not required be be defined
- by implementations of Interface, but is used to form categories of
- interfaces.
+ The function performed by an Interface. This is not required be be defined by implementations of
+ Interface, but is used to form categories of interfaces.
"""
class Distribute:
@@ -308,8 +310,8 @@ def __init__(self, r, cs):
"""
Construct an interface.
- The ``r`` and ``cs`` arguments are required, but may be ``None``, where
- appropriate for the specific ``Interface`` implementation.
+ The ``r`` and ``cs`` arguments are required, but may be ``None``, where appropriate for the
+ specific ``Interface`` implementation.
Parameters
----------
@@ -352,8 +354,8 @@ def distributable(self):
Notes
-----
- Cases where this isn't possible include the database interface,
- where the SQL driver cannot be distributed.
+ Cases where this isn't possible include the database interface, where the SQL driver cannot
+ be distributed.
"""
return self.Distribute.DUPLICATE
@@ -391,17 +393,20 @@ def attachReactor(self, o, r):
self.o = o
def detachReactor(self):
- """Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to save memory."""
+ """Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to
+ save memory.
+ """
self.o = None
self.r = None
self.cs = None
def duplicate(self):
"""
- Duplicate this interface without duplicating some of the large attributes (like the entire reactor).
+ Duplicate this interface without duplicating some of the large attributes (like the entire
+ reactor).
- Makes a copy of interface with detached reactor/operator/settings so that it can be attached to an operator
- at a later point in time.
+ Makes a copy of interface with detached reactor/operator/settings so that it can be attached
+ to an operator at a later point in time.
Returns
-------
@@ -461,9 +466,9 @@ def _initializeParams(self):
Notes
-----
- Parameters with defaults are not written to the database until they have been assigned SINCE_ANYTHING.
- This is done to reduce database size, so that we don't write parameters to the DB that are related to
- interfaces that are not not active.
+ Parameters with defaults are not written to the database until they have been assigned
+ SINCE_ANYTHING. This is done to reduce database size, so that we don't write parameters to
+ the DB that are related to interfaces that are not not active.
"""
for paramDef in parameters.ALL_DEFINITIONS.inCategory(self.name):
if paramDef.default not in (None, parameters.NoDefault):
@@ -507,11 +512,11 @@ def isRequestedDetailPoint(self, cycle=None, node=None):
Notes
-----
- By default, detail points are either during the requested snapshots,
- if any exist, or all cycles and nodes if none exist.
+ By default, detail points are either during the requested snapshots, if any exist, or all
+ cycles and nodes if none exist.
- This is useful for peripheral interfaces (CR Worth, perturbation theory, transients)
- that may or may not be requested during a standard run.
+ This is useful for peripheral interfaces (CR Worth, perturbation theory, transients) that
+ may or may not be requested during a standard run.
If both cycle and node are None, this returns True
@@ -557,12 +562,11 @@ def workerOperate(self, _cmd):
return False
def enabled(self, flag=None):
- r"""
+ """
Mechanism to allow interfaces to be attached but not running at the interaction points.
- Must be implemented on the individual interface level hooks.
- If given no arguments, returns status of enabled
- If arguments, sets enabled to that flag. (True or False)
+ Must be implemented on the individual interface level hooks. If given no arguments, returns
+ status of enabled. If arguments, sets enabled to that flag. (True or False)
Notes
-----
@@ -576,7 +580,7 @@ def enabled(self, flag=None):
raise ValueError("Non-bool passed to assign {}.enable().".format(self))
def bolForce(self, flag=None):
- r"""
+ """
Run interactBOL even if this interface is disabled.
Parameters
@@ -610,26 +614,25 @@ def specifyInputs(cs) -> Dict[Union[str, settings.Setting], List[str]]:
"""
Return a collection of file names that are considered input files.
- This is a static method (i.e. is not called on a particular instance of the
- class), since it should not require an Interface to actually be constructed.
- This would require constructing a reactor object, which is expensive.
+ This is a static method (i.e. is not called on a particular instance of the class), since it
+ should not require an Interface to actually be constructed. This would require constructing
+ a reactor object, which is expensive.
- The files returned by an implementation should be those that one would want
- copied to a target location when cloning a Case or CaseSuite. These can be
- absolute paths, relative paths, or glob patterns that will be interpolated
- relative to the input directory. Absolute paths will not be copied anywhere.
+ The files returned by an implementation should be those that one would want copied to a
+ target location when cloning a Case or CaseSuite. These can be absolute paths, relative
+ paths, or glob patterns that will be interpolated relative to the input directory. Absolute
+ paths will not be copied anywhere.
- The returned dictionary will enable the source Settings object to
- be updated to the new file location. While the dictionary keys are
- recommended to be Setting objects, the name of the setting as a string,
- e.g., "shuffleLogic", is still interpreted. If the string name does not
+ The returned dictionary will enable the source Settings object to be updated to the new file
+ location. While the dictionary keys are recommended to be Setting objects, the name of the
+ setting as a string, e.g., "shuffleLogic", is still interpreted. If the string name does not
point to a valid setting then this will lead to a failure.
Note
----
- This existed before the advent of ARMI plugins. Perhaps it can be better served
- as a plugin hook. Potential future work.
+ This existed before the advent of ARMI plugins. Perhaps it can be better served as a plugin
+ hook. Potential future work.
See Also
--------
@@ -679,8 +682,8 @@ class OutputReader:
Notes
-----
- Should ideally not require r, eci, and fname arguments
- and would rather just have an apply(reactor) method.
+ Should ideally not require r, eci, and fname arguments and would rather just have an
+ apply(reactor) method.
"""
def __init__(self, r=None, externalCodeInterface=None, fName=None, cs=None):
diff --git a/armi/materials/__init__.py b/armi/materials/__init__.py
index 84fd7028f..e8f86261d 100644
--- a/armi/materials/__init__.py
+++ b/armi/materials/__init__.py
@@ -171,7 +171,7 @@ def resolveMaterialClassByName(name: str, namespaceOrder: List[str] = None):
Returns
-------
- matCls : Material
+ matCls : armi.materials.material.Material
The material
Raises
diff --git a/armi/materials/concrete.py b/armi/materials/concrete.py
index dced8404b..38e28724f 100644
--- a/armi/materials/concrete.py
+++ b/armi/materials/concrete.py
@@ -15,9 +15,8 @@
"""
Concrete.
-Concrete is often used to provide structural support of nuclear equipment.
-
-It can also provide radiation shielding.
+Concrete is often used to provide structural support of nuclear equipment. It can also provide
+radiation shielding.
"""
from armi.materials.material import Material
@@ -26,20 +25,20 @@
class Concrete(Material):
"""Simple concreate material.
- http://jolissrch-inter.tokai-sc.jaea.go.jp/pdfdata/JAERI-Data-Code-98-004.pdf
+ https://web.archive.org/web/20221103120449/https://physics.nist.gov/cgi-bin/Star/compos.pl?matno=144
"""
def setDefaultMassFracs(self):
- self.setMassFrac("H", 0.023 / 2.302)
- self.setMassFrac("O16", 1.220 / 2.302)
- self.setMassFrac("C", 0.0023 / 2.302)
- self.setMassFrac("NA23", 0.0368 / 2.302)
- self.setMassFrac("MG", 0.005 / 2.302)
- self.setMassFrac("AL", 0.078 / 2.302)
- self.setMassFrac("SI", 0.775 / 2.302)
- self.setMassFrac("K", 0.0299 / 2.302)
- self.setMassFrac("CA", 0.100 / 2.302)
- self.setMassFrac("FE", 0.032 / 2.302)
+ self.setMassFrac("H", 0.010000)
+ self.setMassFrac("C", 0.001000)
+ self.setMassFrac("O16", 0.529107)
+ self.setMassFrac("NA23", 0.016000)
+ self.setMassFrac("MG", 0.002000)
+ self.setMassFrac("AL", 0.033872)
+ self.setMassFrac("SI", 0.337021)
+ self.setMassFrac("K", 0.013000)
+ self.setMassFrac("CA", 0.044000)
+ self.setMassFrac("FE", 0.014000)
def density(self, Tk=None, Tc=None):
- return 2.302 # g/cm3
+ return 2.3000 # g/cm3
diff --git a/armi/materials/inconel600.py b/armi/materials/inconel600.py
index a59ed7820..824b6ba01 100644
--- a/armi/materials/inconel600.py
+++ b/armi/materials/inconel600.py
@@ -13,7 +13,7 @@
# limitations under the License.
"""Inconel600."""
-import numpy
+import numpy as np
from armi.materials.material import Material
from armi.utils.units import getTc
@@ -39,8 +39,8 @@ class Inconel600(Material):
def __init__(self):
Material.__init__(self)
self.refDens = 8.47 # g/cc
- # Only density measurement presented in the reference.
- # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
+ # Only density measurement presented in the reference. Presumed to be performed at 21C since
+ # this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
@@ -73,7 +73,7 @@ def polyfitThermalConductivity(self, power=2):
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0]
k = [14.9, 15.9, 17.3, 19.0, 20.5, 22.1, 23.9, 25.7, 27.5]
- return numpy.polyfit(numpy.array(Tc), numpy.array(k), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(k), power).tolist()
def thermalConductivity(self, Tk=None, Tc=None):
r"""
@@ -113,7 +113,7 @@ def polyfitHeatCapacity(self, power=2):
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0]
cp = [444.0, 465.0, 486.0, 502.0, 519.0, 536.0, 578.0, 595.0, 611.0, 628.0]
- return numpy.polyfit(numpy.array(Tc), numpy.array(cp), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(cp), power).tolist()
def heatCapacity(self, Tk=None, Tc=None):
r"""
@@ -174,9 +174,7 @@ def polyfitLinearExpansionPercent(self, power=2):
Tc.insert(0, refTempC)
- return numpy.polyfit(
- numpy.array(Tc), numpy.array(linExpPercent), power
- ).tolist()
+ return np.polyfit(np.array(Tc), np.array(linExpPercent), power).tolist()
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
@@ -202,11 +200,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0
diff --git a/armi/materials/inconel625.py b/armi/materials/inconel625.py
index 1328c18dd..1c251bf91 100644
--- a/armi/materials/inconel625.py
+++ b/armi/materials/inconel625.py
@@ -13,7 +13,7 @@
# limitations under the License.
"""Inconel625."""
-import numpy
+import numpy as np
from armi.materials.material import Material
from armi.utils.units import getTc
@@ -78,7 +78,7 @@ def polyfitThermalConductivity(self, power=2):
"""
Tc = [21.0, 38.0, 93.0, 204.0, 316.0, 427.0, 538.0, 649.0, 760.0, 871.0, 982.0]
k = [9.8, 10.1, 10.8, 12.5, 14.1, 15.7, 17.5, 19.0, 20.8, 22.8, 25.2]
- return numpy.polyfit(numpy.array(Tc), numpy.array(k), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(k), power).tolist()
def thermalConductivity(self, Tk=None, Tc=None):
r"""
@@ -142,7 +142,7 @@ def polyfitHeatCapacity(self, power=2):
645.0,
670.0,
]
- return numpy.polyfit(numpy.array(Tc), numpy.array(cp), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(cp), power).tolist()
def heatCapacity(self, Tk=None, Tc=None):
"""
@@ -203,9 +203,7 @@ def polyfitLinearExpansionPercent(self, power=2):
Tc.insert(0, refTempC)
- return numpy.polyfit(
- numpy.array(Tc), numpy.array(linExpPercent), power
- ).tolist()
+ return np.polyfit(np.array(Tc), np.array(linExpPercent), power).tolist()
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
@@ -231,11 +229,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(5.083e-7/100.0)*Tc + 1.125e-3/100.0
diff --git a/armi/materials/inconelX750.py b/armi/materials/inconelX750.py
index b56e2e95d..a6a507d31 100644
--- a/armi/materials/inconelX750.py
+++ b/armi/materials/inconelX750.py
@@ -13,10 +13,10 @@
# limitations under the License.
"""Inconel X750."""
-import numpy
+import numpy as np
-from armi.utils.units import getTc
from armi.materials.material import Material
+from armi.utils.units import getTc
class InconelX750(Material):
@@ -40,7 +40,8 @@ def __init__(self):
Material.__init__(self)
self.refDens = 8.28 # g/cc
# Only density measurement presented in the reference.
- # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
+ # Presumed to be performed at 21C since this was the reference temperature for linear
+ # expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
@@ -62,8 +63,8 @@ def setDefaultMassFracs(self):
def polyfitThermalConductivity(self, power=2):
r"""
- Calculates the coefficients of a polynomial fit for thermalConductivity.
- Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf
+ Calculates the coefficients of a polynomial fit for thermalConductivity. Based on data from
+ https://web.archive.org/web/20170215105917/http://www.specialmetals.com:80/documents/Inconel%20alloy%20X-750.pdf
Fits a polynomial to the data set and returns the coefficients.
Parameters
@@ -103,7 +104,7 @@ def polyfitThermalConductivity(self, power=2):
22.21,
23.65,
]
- return numpy.polyfit(numpy.array(Tc), numpy.array(k), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(k), power).tolist()
def thermalConductivity(self, Tk=None, Tc=None):
r"""
@@ -143,7 +144,7 @@ def polyfitHeatCapacity(self, power=3):
"""
Tc = [21.1, 93.3, 204.4, 315.6, 426.7, 537.8, 648.9, 760.0, 871.1]
cp = [431.2, 456.4, 485.7, 502.4, 523.4, 544.3, 573.6, 632.2, 715.9]
- return numpy.polyfit(numpy.array(Tc), numpy.array(cp), power).tolist()
+ return np.polyfit(np.array(Tc), np.array(cp), power).tolist()
def heatCapacity(self, Tk=None, Tc=None):
r"""
@@ -206,9 +207,7 @@ def polyfitLinearExpansionPercent(self, power=2):
Tc.insert(0, refTempC)
- return numpy.polyfit(
- numpy.array(Tc), numpy.array(linExpPercent), power
- ).tolist()
+ return np.polyfit(np.array(Tc), np.array(linExpPercent), power).tolist()
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
@@ -234,11 +233,12 @@ def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf.
- Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
- from percent strain to strain, then differentiated with respect to temperature to find the correlation
- for instantaneous linear expansion.
+ Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
+ to convert from percent strain to strain, then differentiated with respect to temperature to
+ find the correlation for instantaneous linear expansion.
- i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
+ i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
+ correlation is 2*a/100*Tc + b/100
2*(6.8378e-7/100.0)*Tc + 1.056e-3/100.0
diff --git a/armi/materials/material.py b/armi/materials/material.py
index 9ba80ec59..362b66ece 100644
--- a/armi/materials/material.py
+++ b/armi/materials/material.py
@@ -17,10 +17,12 @@
Most temperatures may be specified in either K or C and the functions will convert for you.
"""
+import functools
+import traceback
import warnings
from scipy.optimize import fsolve
-import numpy
+import numpy as np
from armi import runLog
from armi.nucDirectory import nuclideBases
@@ -32,29 +34,54 @@
FAIL_ON_RANGE = False
-class Material:
+def parentAwareDensityRedirect(f):
+ """Wrap Material.density to warn people about potential problems.
+
+ If a Material is linked to a Component, ``Material.density`` may produce
+ different results from ``Component.density``. The component's density
+ is considered the source of truth because it incorporates changes in volume,
+ composition, and temperature in concert with the state of the reactor.
"""
+
+ @functools.wraps(f)
+ def inner(self: "Material", *args, **kwargs) -> float:
+ if self.parent is not None:
+ stack = traceback.extract_stack()
+ # last entry is here, second to last is what called this
+ caller = stack[-2]
+ label = f"Found call to Material.density in {caller.filename} at line {caller.lineno}"
+ runLog.warning(
+ f"{label}. Calls to Material.density when attached to a component have the potential to induce "
+ "subtle differences as Component.density and Material.density can diverge.",
+ single=True,
+ label=label,
+ )
+ return f(self, *args, **kwargs)
+
+ return inner
+
+
+class Material:
+ r"""
A material is made up of elements or isotopes. It has bulk properties like density.
.. impl:: The abstract material class.
:id: I_ARMI_MAT_PROPERTIES
:implements: R_ARMI_MAT_PROPERTIES
- The ARMI Materials library is based on the Object-Oriented Programming design
- approach, and uses this generic ``Material`` base class. In this class we
- define a large number of material properties like density, heat capacity, or
- linear expansion coefficient. Specific materials then subclass this base class to
- assign particular values to those properties.
+ The ARMI Materials library is based on the Object-Oriented Programming design approach, and
+ uses this generic ``Material`` base class. In this class we define a large number of
+ material properties like density, heat capacity, or linear expansion coefficient. Specific
+ materials then subclass this base class to assign particular values to those properties.
.. impl:: Materials generate nuclide mass fractions at instantiation.
:id: I_ARMI_MAT_FRACS
:implements: R_ARMI_MAT_FRACS
- An ARMI material is meant to be able to represent real world materials that
- might be used in the construction of a nuclear reactor. As such, they are
- not just individual nuclides, but practical materials like a particular
- concrete, steel, or water. One of the main things that will be needed to
- describe such a material is the exact nuclide fractions. As such, the
+ An ARMI material is meant to be able to represent real world materials that might be used in
+ the construction of a nuclear reactor. As such, they are not just individual nuclides, but
+ practical materials like a particular concrete, steel, or water. One of the main things that
+ will be needed to describe such a material is the exact nuclide fractions. As such, the
constructor of every Material subclass attempts to set these mass fractions.
Attributes
@@ -64,13 +91,12 @@ class Material:
massFrac : dict
Mass fractions for all nuclides in the material keyed on the nuclide symbols
refDens : float
- A reference density used by some materials, for instance `SimpleSolid`s,
- during thermal expansion
+ A reference density used by some materials, for instance `SimpleSolid`\ s, during thermal
+ expansion
theoreticalDensityFrac : float
- Fraction of the material's density in reality, which is commonly different
- from 1.0 in solid materials due to the manufacturing process.
- Can often be set from the blueprints input via the TD_frac material modification.
- For programmatic setting, use `adjustTD()`.
+ Fraction of the material's density in reality, which is commonly different from 1.0 in solid
+ materials due to the manufacturing process. Can often be set from the blueprints input via
+ the TD_frac material modification. For programmatic setting, use `adjustTD()`.
Notes
-----
@@ -78,6 +104,11 @@ class Material:
for that material.
"""
+ def __init_subclass__(cls) -> None:
+ # Apply the density decorator to every subclass
+ if not hasattr(cls.density, "__wrapped__"):
+ cls.density = parentAwareDensityRedirect(cls.density)
+
DATA_SOURCE = "ARMI"
"""Indication of where the material is loaded from (may be plugin name)"""
@@ -122,10 +153,10 @@ def name(self):
:id: I_ARMI_MAT_NAME
:implements: R_ARMI_MAT_NAME
- Every instance of an ARMI material must have a simple, human-readable
- string name. And, if possible, we want this string to match the class
- name. (This, of course, puts some limits on both the string and the
- class name.) These names are easily retrievable as a class property.
+ Every instance of an ARMI material must have a simple, human-readable string name. And,
+ if possible, we want this string to match the class name. (This, of course, puts some
+ limits on both the string and the class name.) These names are easily retrievable as a
+ class property.
"""
return self._name
@@ -135,8 +166,8 @@ def name(self, nomen):
Warning
-------
- Some code in ARMI expects the "name" of a meterial matches its
- class name. So you use this method at your own risk.
+ Some code in ARMI expects the "name" of a meterial matches its class name. So you use this
+ method at your own risk.
See Also
--------
@@ -203,8 +234,7 @@ def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:
"""
The instantaneous linear expansion coefficient (dL/L)/dT.
- This is used for reactivity coefficients, etc. but will not affect
- density or dimensions.
+ This is used for reactivity coefficients, etc. but will not affect density or dimensions.
See Also
--------
@@ -239,8 +269,7 @@ def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
def linearExpansionFactor(self, Tc: float, T0: float) -> float:
"""
- Return a dL/L factor relative to T0 instead of the material-dependent reference
- temperature.
+ Return a dL/L factor relative to T0 instead of the material-dependent reference temperature.
Notes
-----
@@ -285,9 +314,8 @@ def setMassFrac(self, nucName: str, massFrac: float) -> None:
Notes
-----
- This will try to convert the provided ``massFrac`` into a float
- for assignment. If the conversion cannot occur then an error
- will be thrown.
+ This will try to convert the provided ``massFrac`` into a float for assignment. If the
+ conversion cannot occur then an error will be thrown.
"""
try:
massFrac = float(massFrac)
@@ -323,10 +351,11 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
"""
Change the mass fraction of the specified nuclide.
- This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same element. If there
- are no other nuclides within the element, then it is enriched relative to the entire material. For example,
- enriching U235 in UZr would enrich U235 relative to U238 and other naturally occurring uranium isotopes.
- Likewise, enriching ZR in UZr would enrich ZR relative to uranium.
+ This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same
+ element. If there are no other nuclides within the element, then it is enriched relative to
+ the entire material. For example, enriching U235 in UZr would enrich U235 relative to U238
+ and other naturally occurring uranium isotopes. Likewise, enriching ZR in UZr would enrich
+ ZR relative to uranium.
The method maintains a constant number of atoms, and adjusts ``refDens`` accordingly.
@@ -349,8 +378,8 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
# refDens could be zero, but cannot normalize to zero.
density = self.refDens or 1.0
- massDensities = numpy.array([self.massFrac[nuc] for nuc in nucsNames]) * density
- atomicMasses = numpy.array(
+ massDensities = np.array([self.massFrac[nuc] for nuc in nucsNames]) * density
+ atomicMasses = np.array(
[nuclideBases.byName[nuc].weight for nuc in nucsNames]
) # in AMU
molesPerCC = massDensities / atomicMasses # item-wise division
@@ -365,16 +394,16 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
if isinstance(
nuclideBases.byName[nuclideName], nuclideBases.NaturalNuclideBase
) or nuclideBases.isMonoIsotopicElement(nuclideName):
- # if there are not any other nuclides, assume we are enriching an entire element
- # consequently, allIndicesUpdated is no longer the element's indices, but the materials indices
+ # If there are not any other nuclides, assume we are enriching an entire element.
+ # Consequently, allIndicesUpdated is no longer the element's indices, but the
+ # materials indices
allIndicesUpdated = range(len(nucsNames))
else:
raise ValueError( # could be warning if problematic
- "Nuclide {} was to be enriched in material {}, but there were no other isotopes of "
- "that element. Could not assume the enrichment of the entire element as there were "
- "other possible isotopes that did not exist in this material.".format(
- nuclideName, self
- )
+ "Nuclide {} was to be enriched in material {}, but there were no other "
+ "isotopes of that element. Could not assume the enrichment of the entire "
+ "element as there were other possible isotopes that did not exist in this "
+ "material.".format(nuclideName, self)
)
if massFraction == 1.0:
@@ -388,8 +417,8 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
onlyOneOtherFracToDetermine = len(allIndicesUpdated) == 2
if not onlyOneOtherFracToDetermine:
raise ValueError(
- "Material {} has too many masses set to zero. cannot enrich {} to {}. Current "
- "mass fractions: {}".format(
+ "Material {} has too many masses set to zero. cannot enrich {} to {}. "
+ "Current mass fractions: {}".format(
self, nuclideName, massFraction, self.massFrac
)
)
@@ -418,7 +447,7 @@ def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
updatedDensity = updatedMassDensities.sum()
massFracs = updatedMassDensities / updatedDensity
- if not numpy.isclose(sum(massFracs), 1.0, atol=1e-10):
+ if not np.isclose(sum(massFracs), 1.0, atol=1e-10):
raise RuntimeError(
f"The mass fractions {massFracs} in {self} do not sum to 1.0."
)
@@ -437,7 +466,7 @@ def getTemperatureAtDensity(
# 0 at tempertature of targetDensity
densFunc = lambda temp: self.density(Tc=temp) - targetDensity
# is a numpy array if fsolve is called
- tAtTargetDensity = float(fsolve(densFunc, tempGuessInC))
+ tAtTargetDensity = float(fsolve(densFunc, tempGuessInC)[0])
return tAtTargetDensity
@property
@@ -500,9 +529,8 @@ def density(self, Tk: float = None, Tc: float = None) -> float:
Notes
-----
- Since refDens is specified at the material-dep reference case, we don't
- need to specify the reference temperature. It is already consistent with linearExpansion
- Percent.
+ Since refDens is specified at the material-dep reference case, we don't need to specify the
+ reference temperature. It is already consistent with linearExpansion Percent.
- p*(dp/p(T) + 1) =p*( p + dp(T) )/p = p + dp(T) = p(T)
- dp/p = (1-(1 + dL/L)**3)/(1 + dL/L)**3
"""
@@ -577,15 +605,15 @@ def getMassFrac(
Notes
-----
- self.massFrac are modified mass fractions that may not add up to 1.0
- (for instance, after a axial expansion, the modified mass fracs will sum to less than one.
- The alternative is to put a multiplier on the density. They're mathematically equivalent.
+ self.massFrac are modified mass fractions that may not add up to 1.0 (for instance, after a
+ axial expansion, the modified mass fracs will sum to less than one. The alternative is to
+ put a multiplier on the density. They're mathematically equivalent.
- This function returns the normalized mass fraction (they will add to 1.0) as long as
- the mass fracs are modified only by get and setMassFrac
+ This function returns the normalized mass fraction (they will add to 1.0) as long as the
+ mass fracs are modified only by get and setMassFrac
- This is a performance-critical method as it is called millions of times in a
- typical ARMI run.
+ This is a performance-critical method as it is called millions of times in a typical ARMI
+ run.
See Also
--------
@@ -626,7 +654,9 @@ def checkPropertyTempRange(self, label, val):
def checkTempRange(self, minT, maxT, val, label=""):
"""
- Checks if the given temperature (val) is between the minT and maxT temperature limits supplied.
+ Checks if the given temperature (val) is between the minT and maxT temperature limits
+ supplied.
+
Label identifies what material type or element is being evaluated in the check.
Parameters
@@ -644,7 +674,7 @@ def checkTempRange(self, minT, maxT, val, label=""):
msg = "Temperature {0} out of range ({1} to {2}) for {3} {4}".format(
val, minT, maxT, self.name, label
)
- if FAIL_ON_RANGE or numpy.isnan(val):
+ if FAIL_ON_RANGE or np.isnan(val):
runLog.error(msg)
raise ValueError
else:
@@ -683,13 +713,11 @@ def getNuclides(self):
Notes
-----
- This method is the only reason Materials still have self.parent.
- Essentially, we want to change that, but right now the logic for finding
- nuclides in the Reactor is recursive and considers Materials first.
- The bulk of the work in finally removing this method will come in
- downstream repos, where users have fully embraced this method and call
- it directly in many, many places.
- Please do not use this method, as it is being deprecated.
+ This method is the only reason Materials still have self.parent. Essentially, we want to
+ change that, but right now the logic for finding nuclides in the Reactor is recursive and
+ considers Materials first. The bulk of the work in finally removing this method will come in
+ downstream repos, where users have fully embraced this method and call it directly in many,
+ many places. Please do not use this method, as it is being deprecated.
"""
warnings.warn("Material.getNuclides is being deprecated.", DeprecationWarning)
return self.parent.getNuclides()
@@ -703,8 +731,9 @@ def getTempChangeForDensityChange(
deltaT = linearChange / linearExpansion
if not quiet:
runLog.info(
- f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is {linearExpansion}.\n"
- f"A change in density of {(densityFrac - 1.0) * 100.0} percent at would require a change in temperature of {deltaT} C.",
+ f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is "
+ f"{linearExpansion}.\nA change in density of {(densityFrac - 1.0) * 100.0} percent "
+ "at would require a change in temperature of {deltaT} C.",
single=True,
)
return deltaT
@@ -729,7 +758,9 @@ class Fluid(Material):
"""A material that fills its container. Could also be a gas."""
def getThermalExpansionDensityReduction(self, prevTempInC, newTempInC):
- """Return the factor required to update thermal expansion going from temperatureInC to temperatureInCNew."""
+ """Return the factor required to update thermal expansion going from one temperature (in
+ Celcius) to a new temperature.
+ """
rho0 = self.pseudoDensity(Tc=prevTempInC)
if not rho0:
return 1.0
@@ -744,10 +775,10 @@ def linearExpansion(self, Tk=None, Tc=None):
:id: I_ARMI_MAT_FLUID
:implements: R_ARMI_MAT_FLUID
- ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass
- therefore sets the thermal expansion coefficient to zero. All fluids
- subclassing the ``Fluid`` material will inherit this method which sets the
- linear expansion coefficient to zero at all temperatures.
+ ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass therefore sets
+ the thermal expansion coefficient to zero. All fluids subclassing the ``Fluid``
+ material will inherit this method which sets the linear expansion coefficient to zero at
+ all temperatures.
"""
return 0.0
@@ -761,8 +792,8 @@ def getTempChangeForDensityChange(
deltaT = tAtPerturbedDensity - Tc
if not quiet:
runLog.info(
- "A change in density of {} percent in {} at an initial temperature of {} C would require "
- "a change in temperature of {} C.".format(
+ "A change in density of {} percent in {} at an initial temperature of {} C would "
+ "require a change in temperature of {} C.".format(
(densityFrac - 1.0) * 100.0, self.getName(), Tc, deltaT
),
single=True,
@@ -815,9 +846,8 @@ def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
Notes
-----
- This only method only works for Simple Solid Materials which assumes
- the density function returns 'free expansion' density as a function
- temperature
+ This only method only works for Simple Solid Materials which assumes the density function
+ returns 'free expansion' density as a function temperature
"""
density1 = self.density(Tk=self.refTempK)
density2 = self.density(Tk=Tk, Tc=Tc)
@@ -864,13 +894,13 @@ def applyInputParams(
Notes
-----
- This is often overridden to insert customized material modification parameters
- but then this parent should always be called at the end in case users want to
- use this style of custom input.
+ This is often overridden to insert customized material modification parameters but then this
+ parent should always be called at the end in case users want to use this style of custom
+ input.
- This is only applied to materials considered fuel so we don't apply these
- kinds of parameters to coolants and structural material, which are often
- not parameterized with any kind of enrichment.
+ This is only applied to materials considered fuel so we don't apply these kinds of
+ parameters to coolants and structural material, which are often not parameterized with any
+ kind of enrichment.
"""
if class1_wt_frac:
if not 0 <= class1_wt_frac <= 1:
@@ -891,8 +921,8 @@ def applyInputParams(
)
if class1_custom_isotopics == class2_custom_isotopics:
runLog.warning(
- "The custom isotopics specified for the class1/class2 materials"
- f" are both '{class1_custom_isotopics}'. You are not actually blending anything!"
+ "The custom isotopics specified for the class1/class2 materials are both "
+ f"'{class1_custom_isotopics}'. You are not actually blending anything!"
)
self.class1_wt_frac = class1_wt_frac
@@ -907,8 +937,8 @@ def _applyIsotopicsMixFromCustomIsotopicsInput(self, customIsotopics):
Only adjust heavy metal.
- This may also be needed for building charge assemblies during reprocessing, but
- will take input from the SFP rather than from the input external feeds.
+ This may also be needed for building charge assemblies during reprocessing, but will take
+ input from the SFP rather than from the input external feeds.
"""
class1Isotopics = customIsotopics[self.class1_custom_isotopics]
class2Isotopics = customIsotopics[self.class2_custom_isotopics]
diff --git a/armi/materials/mox.py b/armi/materials/mox.py
index f4fdfc191..b2ab41700 100644
--- a/armi/materials/mox.py
+++ b/armi/materials/mox.py
@@ -17,8 +17,9 @@
A definitive source for these properties is [#ornltm20002]_.
-.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov, et.al.
- Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
+.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of
+ Irradiation. S.G. Popov, et.al. Oak Ridge National Laboratory.
+ ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
"""
from armi import runLog
@@ -31,8 +32,8 @@ class MOX(UraniumOxide):
"""
MOX fuel.
- Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide.
- These parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.
+ Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide. These
+ parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.
Specific MOX mixtures may be defined in blueprints under custom isotopics.
"""
diff --git a/armi/materials/tZM.py b/armi/materials/tZM.py
index 84da84581..98a9542f7 100644
--- a/armi/materials/tZM.py
+++ b/armi/materials/tZM.py
@@ -22,9 +22,9 @@
class TZM(Material):
propertyValidTemperature = {"linear expansion percent": ((21.11, 1382.22), "C")}
references = {
- "linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced \
- in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons Contract No. N600(19)-59530, \
- Southern Research Institute"
+ "linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten \
+ and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau \
+ of Naval Weapons Contract No. N600(19)-59530, Southern Research Institute"
}
temperatureC = [
@@ -66,8 +66,8 @@ def setDefaultMassFracs(self):
self.setMassFrac("MO", 0.996711222)
def linearExpansionPercent(self, Tk=None, Tc=None):
- r"""
- return linear expansion in %dL/L from interpolation of tabular data.
+ """
+ Return linear expansion in %dL/L from interpolation of tabular data.
This function is used to expand a material from its reference temperature (21C)
to a particular hot temperature.
@@ -79,9 +79,9 @@ def linearExpansionPercent(self, Tk=None, Tc=None):
Tc : float
temperature in C
- Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced \
- in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons Contract No. N600(19)-59530, 1966 \
- Southern Research Institute.
+ Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced
+ in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons
+ Contract No. N600(19)-59530, 1966 Southern Research Institute.
See Table viii-b, Appendix B, page 181.
"""
diff --git a/armi/materials/tests/test_materials.py b/armi/materials/tests/test_materials.py
index aa670d947..1b27df200 100644
--- a/armi/materials/tests/test_materials.py
+++ b/armi/materials/tests/test_materials.py
@@ -46,7 +46,7 @@ def test_isPicklable(self):
)
def test_density(self):
- """Test that all materials produce a zero density from density."""
+ """Test that all materials produce a non-zero density from density."""
self.assertNotEqual(self.mat.density(500), 0)
def test_TD(self):
@@ -95,6 +95,10 @@ def test_pseudoDensityKgM3(self):
densKgM3 = self.mat.pseudoDensityKgM3(500)
self.assertEqual(dens * 1000.0, densKgM3)
+ def test_wrappedDensity(self):
+ """Test that the density decorator is applied."""
+ self.assertTrue(hasattr(self.mat.density, "__wrapped__"))
+
class MaterialConstructionTests(unittest.TestCase):
def test_material_initialization(self):
@@ -1354,8 +1358,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1375,8 +1380,9 @@ def test_02_linearExpansion(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansion(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:"
+ "{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1494,8 +1500,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
@@ -1632,8 +1639,9 @@ def test_01_linearExpansionPercent(self):
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
- errorMsg = "\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
- cur, ref
+ errorMsg = (
+ "\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\n"
+ "Received:{}\nExpected:{}\n".format(cur, ref)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
diff --git a/armi/materials/water.py b/armi/materials/water.py
index 8b44a23bd..0cd35b66e 100644
--- a/armi/materials/water.py
+++ b/armi/materials/water.py
@@ -21,6 +21,11 @@
from armi.utils import units
from armi.utils.units import getTk
+_REF_SR1_86 = (
+ "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and "
+ "Steam"
+)
+
class Water(Fluid):
"""
@@ -41,13 +46,13 @@ class Water(Fluid):
thermalScatteringLaws = (tsl.byNbAndCompound[nb.byName["H"], tsl.H2O],)
references = {
- "vapor pressure": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "enthalpy (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "enthalpy (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "entropy (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "entropy (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "density (saturated water)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
- "density (saturated steam)": "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam",
+ "vapor pressure": _REF_SR1_86,
+ "enthalpy (saturated water)": _REF_SR1_86,
+ "enthalpy (saturated steam)": _REF_SR1_86,
+ "entropy (saturated water)": _REF_SR1_86,
+ "entropy (saturated steam)": _REF_SR1_86,
+ "density (saturated water)": _REF_SR1_86,
+ "density (saturated steam)": _REF_SR1_86,
}
TEMPERATURE_CRITICAL_K = 647.096
diff --git a/armi/mpiActions.py b/armi/mpiActions.py
index ce9b32237..96b3495b0 100644
--- a/armi/mpiActions.py
+++ b/armi/mpiActions.py
@@ -59,7 +59,6 @@
import timeit
from six.moves import cPickle
-import tabulate
from armi import context
from armi import interfaces
@@ -69,6 +68,7 @@
from armi.reactor import reactors
from armi.reactor.parameters import parameterDefinitions
from armi.utils import iterables
+from armi.utils import tabulate
class MpiAction:
@@ -138,7 +138,7 @@ def _mpiOperationHelper(self, obj, mpiFunction):
self.o = self.r = self.cs = None
try:
return mpiFunction(obj, root=0)
- except (cPickle.PicklingError) as error:
+ except cPickle.PicklingError as error:
runLog.error("Failed to {} {}.".format(mpiFunction.__name__, obj))
runLog.error(error)
raise
@@ -553,8 +553,8 @@ def invokeHook(self):
self.r.core.regenAssemblyLists()
# check to make sure that everything has been properly reattached
- if self.r.core.getFirstBlock().r is not self.r:
- raise RuntimeError("Block.r is not self.r. Reattach the blocks!")
+ if self.r.core.getFirstBlock().core.r is not self.r:
+ raise RuntimeError("Block.core.r is not self.r. Reattach the blocks!")
beforeCollection = timeit.default_timer()
diff --git a/armi/nucDirectory/elements.py b/armi/nucDirectory/elements.py
index ac201687c..35b33794c 100644
--- a/armi/nucDirectory/elements.py
+++ b/armi/nucDirectory/elements.py
@@ -71,7 +71,7 @@
Retrieve elements that are classified as actinides:
-
+
>>> elements.getElementsByChemicalGroup(elements.ChemicalGroup.ACTINIDE)
[,
,
@@ -96,8 +96,8 @@
:ref:`nuclide bases summary table `.
.. exec::
- from tabulate import tabulate
from armi.nucDirectory import elements
+ from armi.utils.tabulate import tabulate
from dochelpers import createTable
attributes = ['z',
@@ -122,10 +122,11 @@ def getAttributes(element):
]
sortedElements = sorted(elements.byZ.values())
- return createTable(tabulate(tabular_data=[getAttributes(elem) for elem in sortedElements],
+ return createTable(tabulate(data=[getAttributes(elem) for elem in sortedElements],
headers=attributes,
- tablefmt='rst'),
- caption='List of elements')
+ tableFmt='rst'),
+ caption='List of elements',
+ label='nuclide-bases-table')
"""
import os
diff --git a/armi/nucDirectory/nuclideBases.py b/armi/nucDirectory/nuclideBases.py
index 0d92103b4..b4e9bc487 100644
--- a/armi/nucDirectory/nuclideBases.py
+++ b/armi/nucDirectory/nuclideBases.py
@@ -96,7 +96,7 @@ class which is used to organize and store metadata about each nuclide. The
import os
from ruamel.yaml import YAML
-import numpy
+import numpy as np
from armi import context
from armi import runLog
@@ -317,8 +317,9 @@ def __init__(
"""
Create an instance of an INuclide.
- .. warning::
- Do not call this constructor directly; use the factory instead.
+ Warning
+ -------
+ Do not call this constructor directly; use the factory instead.
"""
if element not in elements.byName.values():
raise ValueError(
@@ -326,7 +327,8 @@ def __init__(
)
if state < 0:
raise ValueError(
- f"Error in initializing nuclide {name}. An invalid state {state} is provided. The state must be a positive integer."
+ f"Error in initializing nuclide {name}. An invalid state {state} is provided. The "
+ "state must be a positive integer."
)
if halflife < 0.0:
raise ValueError(
@@ -726,7 +728,7 @@ def __init__(self, name, element):
[nn.weight * nn.abundance for nn in element.getNaturalIsotopics()]
),
abundance=0.0,
- halflife=numpy.inf,
+ halflife=np.inf,
name=name,
label=name,
)
@@ -825,7 +827,7 @@ def __init__(self, name, weight):
state=0,
weight=weight,
abundance=0.0,
- halflife=numpy.inf,
+ halflife=np.inf,
name=name,
label="DMP" + name[4],
)
@@ -891,7 +893,7 @@ def __init__(self, name, weight):
state=0,
weight=weight,
abundance=0.0,
- halflife=numpy.inf,
+ halflife=np.inf,
name=name,
label=name[1:],
)
@@ -1219,7 +1221,7 @@ def addNuclideBases():
abun = float(lineData[6])
halflife = lineData[7]
if halflife == "inf":
- halflife = numpy.inf
+ halflife = np.inf
else:
halflife = float(halflife)
nuSF = float(lineData[8])
diff --git a/armi/nucDirectory/tests/test_elements.py b/armi/nucDirectory/tests/test_elements.py
index e508c8596..7236c615f 100644
--- a/armi/nucDirectory/tests/test_elements.py
+++ b/armi/nucDirectory/tests/test_elements.py
@@ -23,16 +23,10 @@
class TestElement(unittest.TestCase):
def test_elements_elementBulkProperties(self):
- numElements = 120
- self.assertEqual(
- sum(range(1, numElements + 1)), sum([ee.z for ee in elements.byZ.values()])
- )
+ numElements = len(elements.byZ)
self.assertEqual(numElements, len(elements.byZ.values()))
self.assertEqual(numElements, len(elements.byName))
self.assertEqual(numElements, len(elements.bySymbol))
- self.assertEqual(numElements, len(elements.byZ))
- for ee in elements.byZ.values():
- self.assertIsNotNone(ee.standardWeight)
def test_element_elementByNameReturnsElement(self):
"""Get elements by name.
@@ -80,7 +74,6 @@ def test_element_addedElementAppearsInElementList(self):
# re-initialize the elements
with mockRunLogs.BufferLog():
nuclideBases.destroyGlobalNuclides()
- elements.factory()
nuclideBases.factory()
# Ensure that the burn chain data is initialized after clearing
# out the nuclide data and reinitializing it.
diff --git a/armi/nucDirectory/tests/test_nucDirectory.py b/armi/nucDirectory/tests/test_nucDirectory.py
index f0e180b6e..107e91119 100644
--- a/armi/nucDirectory/tests/test_nucDirectory.py
+++ b/armi/nucDirectory/tests/test_nucDirectory.py
@@ -15,15 +15,10 @@
"""Tests nuclide directory."""
import unittest
-from armi.nucDirectory import nucDir, elements, nuclideBases
-from armi.tests import mockRunLogs
+from armi.nucDirectory import nucDir, nuclideBases
class TestNucDirectory(unittest.TestCase):
- def setUp(self):
- with mockRunLogs.BufferLog():
- elements.factory()
-
def test_nucDir_getNameForOldDashedNames(self):
oldNames = [
"U-232",
diff --git a/armi/nucDirectory/tests/test_nuclideBases.py b/armi/nucDirectory/tests/test_nuclideBases.py
index 0d0773852..01ca11f08 100644
--- a/armi/nucDirectory/tests/test_nuclideBases.py
+++ b/armi/nucDirectory/tests/test_nuclideBases.py
@@ -21,7 +21,7 @@
from ruamel.yaml import YAML
from armi.context import RES
-from armi.nucDirectory import nuclideBases, elements
+from armi.nucDirectory import nuclideBases
from armi.nucDirectory.tests import NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH
from armi.utils.units import SECONDS_PER_HOUR, AVOGADROS_NUMBER, CURIE_PER_BECQUEREL
@@ -31,7 +31,6 @@ class TestNuclide(unittest.TestCase):
def setUpClass(cls):
cls.nucDirectoryTestsPath = NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH
nuclideBases.destroyGlobalNuclides()
- elements.factory()
nuclideBases.factory()
# Ensure that the burn chain data is initialized before running these tests.
nuclideBases.burnChainImposed = False
@@ -128,13 +127,7 @@ def test_NaturalNuclide_atomicWeightIsAverageOfNaturallyOccuringIsotopes(self):
atomicMass = 0.0
for natIso in natNuk.getNaturalIsotopics():
atomicMass += natIso.abundance * natIso.weight
- self.assertEqual(
- atomicMass,
- natNuk.weight,
- "{} weight is {}, expected {}".format(
- natNuk, natNuk.weight, atomicMass
- ),
- )
+ self.assertAlmostEqual(atomicMass, natNuk.weight, delta=0.000001)
def test_nucBases_labelAndNameCollsionsAreForSameNuclide(self):
"""The name and labels for correct for nuclides.
diff --git a/armi/nuclearDataIO/cccc/__init__.py b/armi/nuclearDataIO/cccc/__init__.py
index 9363cedb8..5150b6cf4 100644
--- a/armi/nuclearDataIO/cccc/__init__.py
+++ b/armi/nuclearDataIO/cccc/__init__.py
@@ -58,7 +58,7 @@
.. [CCCC-IV] R. Douglas O'Dell, "Standard Interface Files and Procedures for Reactor Physics
Codes, Version IV," LA-6941-MS, Los Alamos National Laboratory (September 1977).
- Web. doi:10.2172/5369298. (`OSTI `_)
+ Web. doi:10.2172/5369298. (`OSTI `__)
Using the system
----------------
@@ -115,4 +115,4 @@
This was originally inspired by Prof. James Paul Holloway's alpha
release of ccccutils written in c++ from 2001.
"""
-from .cccc import * # noqa: unused-import
+from armi.nuclearDataIO.cccc.cccc import * # noqa: F403
diff --git a/armi/nuclearDataIO/cccc/cccc.py b/armi/nuclearDataIO/cccc/cccc.py
index dd3133481..8d2756a20 100644
--- a/armi/nuclearDataIO/cccc/cccc.py
+++ b/armi/nuclearDataIO/cccc/cccc.py
@@ -13,10 +13,10 @@
# limitations under the License.
"""
-Defines containers for the reading and writing standard interface files
-for reactor physics codes.
+Defines containers for the reading and writing standard interface files for reactor physics codes.
-.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format files for reactor physics codes
+.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format
+ files for reactor physics codes
:id: I_ARMI_NUCDATA
:implements: R_ARMI_NUCDATA_ISOTXS,
R_ARMI_NUCDATA_GAMISO,
@@ -25,24 +25,21 @@
R_ARMI_NUCDATA_PMATRX,
R_ARMI_NUCDATA_DLAYXS
- This module provides a number of base classes that implement general
- capabilities for binary and ASCII file I/O. The :py:class:`IORecord` serves
- as an abstract base class that instantiates a number of methods that the
- binary and ASCII children classes are meant to implement. These methods,
- prefixed with ``rw``, are meant to convert literal data types, e.g. float or
- int, to either binary or ASCII. This base class does its own conversion for
- container data types, e.g. list or matrix, relying on the child
- implementation of the literal types that the container possesses. The binary
- conversion is implemented in :py:class:`BinaryRecordReader` and
+ This module provides a number of base classes that implement general capabilities for binary and
+ ASCII file I/O. The :py:class:`IORecord` serves as an abstract base class that instantiates a
+ number of methods that the binary and ASCII children classes are meant to implement. These
+ methods, prefixed with ``rw``, are meant to convert literal data types, e.g. float or int, to
+ either binary or ASCII. This base class does its own conversion for container data types, e.g.
+ list or matrix, relying on the child implementation of the literal types that the container
+ possesses. The binary conversion is implemented in :py:class:`BinaryRecordReader` and
:py:class:`BinaryRecordWriter`. The ASCII conversion is implemented in
:py:class:`AsciiRecordReader` and :py:class:`AsciiRecordWriter`.
- These :py:class:`IORecord` classes are used within :py:class:`Stream` objects
- for the data conversion. :py:class:`Stream` is a context manager that opens
- a file for reading or writing on the ``__enter__`` and closes that file upon
- ``__exit__``. :py:class:`Stream` is an abstract base class that is
- subclassed for each CCCC file. It is subclassed directly for the CCCC files
- that contain cross-section data:
+ These :py:class:`IORecord` classes are used within :py:class:`Stream` objects for the data
+ conversion. :py:class:`Stream` is a context manager that opens a file for reading or writing on
+ the ``__enter__`` and closes that file upon ``__exit__``. :py:class:`Stream` is an abstract base
+ class that is subclassed for each CCCC file. It is subclassed directly for the CCCC files that
+ contain cross-section data:
* :py:class:`ISOTXS `
* :py:mod:`GAMISO `
@@ -50,47 +47,43 @@
* :py:class:`DLAYXS `
* :py:mod:`COMPXS `
- For the CCCC file types that are outputs from a flux solver such as DIF3D
- (e.g., GEODST, DIF3D, NHFLUX) the streams are subclassed from
- :py:class:`StreamWithDataContainer`, which is a special abstract subclass of
- :py:class:`Stream` that implements a common pattern used for these file
- types. In a :py:class:`StreamWithDataContainer`, the data is directly read
- to or written from a specialized data container.
+ For the CCCC file types that are outputs from a flux solver such as DIF3D (e.g., GEODST, DIF3D,
+ NHFLUX) the streams are subclassed from :py:class:`StreamWithDataContainer`, which is a special
+ abstract subclass of :py:class:`Stream` that implements a common pattern used for these file
+ types. In a :py:class:`StreamWithDataContainer`, the data is directly read to or written from a
+ specialized data container.
- The data container structure for each type of CCCC file is implemented in
- the module for that file, as a subclass of :py:class:`DataContainer`. The
- subclasses for each CCCC file type define standard attribute names for the
- data that will be read from or written to the CCCC file. CCCC file types
- that follow this pattern include:
+ The data container structure for each type of CCCC file is implemented in the module for that
+ file, as a subclass of :py:class:`DataContainer`. The subclasses for each CCCC file type define
+ standard attribute names for the data that will be read from or written to the CCCC file. CCCC
+ file types that follow this pattern include:
* :py:class:`GEODST `
* :py:class:`DIF3D `
- * :py:class:`NHFLUX `
- (and multiple sub-classes thereof)
+ * :py:class:`NHFLUX ` (and multiple sub-classes)
* :py:class:`LABELS `
* :py:class:`PWDINT `
* :py:class:`RTFLUX `
* :py:class:`RZFLUX `
* :py:class:`RTFLUX `
- The logic to parse or write each specific file format is contained within
- the :py:meth:`Stream.readWrite` implementations of the respective
- subclasses.
+ The logic to parse or write each specific file format is contained within the
+ :py:meth:`Stream.readWrite` implementations of the respective subclasses.
"""
import io
import itertools
-import struct
import os
+import struct
from copy import deepcopy
from typing import List
-import numpy
+import numpy as np
from armi import runLog
from armi.nuclearDataIO import nuclearFileMetadata
IMPLICIT_INT = "IJKLMN"
-"""Letters that trigger implicit integer types in old FORTRAN 77 codes"""
+"""Letters that trigger implicit integer types in old FORTRAN 77 codes."""
class IORecord:
@@ -180,7 +173,6 @@ def rwInt(self, val):
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
-
"""
raise NotImplementedError()
@@ -197,7 +189,6 @@ def rwFloat(self, val):
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
-
"""
raise NotImplementedError()
@@ -209,7 +200,6 @@ def rwDouble(self, val):
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
-
"""
raise NotImplementedError()
@@ -221,7 +211,6 @@ def rwString(self, val, length):
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
-
"""
raise NotImplementedError()
@@ -246,7 +235,7 @@ def rwList(self, contents, containedType, length, strLength=0):
"string": lambda val: self.rwString(val, strLength),
"double": self.rwDouble,
}
- action = actions.get(containedType, None)
+ action = actions.get(containedType)
if action is None:
raise Exception(
'Cannot pack or unpack the type "{}".'.format(containedType)
@@ -254,7 +243,7 @@ def rwList(self, contents, containedType, length, strLength=0):
# this little trick will make this work for both reading and writing, yay!
if contents is None or len(contents) == 0:
contents = [None for _ in range(length)]
- return numpy.array([action(contents[ii]) for ii in range(length)])
+ return np.array([action(contents[ii]) for ii in range(length)])
def rwMatrix(self, contents, *shape):
"""A method for reading and writing a matrix of floating point values.
@@ -300,20 +289,20 @@ def _rwMatrix(contents, func, *shape):
Notes
-----
This can be important for performance when reading large matrices (e.g. scatter
- matrices). It may be worth investigating ``numpy.frombuffer`` on read and
+ matrices). It may be worth investigating ``np.frombuffer`` on read and
something similar on write.
With shape, the first shape argument should be the outermost loop because
these are stored in column major order (the FORTRAN way).
- Note that numpy.ndarrays can be built with ``order="F"`` to have column-major ordering.
+ Note that np.ndarrays can be built with ``order="F"`` to have column-major ordering.
So if you have ``((MR(I,J),I=1,NCINTI),J=1,NCINTJ)`` you would pass in
the shape as (NCINTJ, NCINTI).
"""
fortranShape = list(reversed(shape))
if contents is None or contents.size == 0:
- contents = numpy.empty(fortranShape)
+ contents = np.empty(fortranShape)
for index in itertools.product(*[range(ii) for ii in shape]):
fortranIndex = tuple(reversed(index))
contents[fortranIndex] = func(contents[fortranIndex])
@@ -343,7 +332,6 @@ class BinaryRecordReader(IORecord):
This class reads a single CCCC record in binary format. A CCCC record consists of a leading and
ending integer indicating how many bytes the record is. The data contained within the record may
be integer, float, double, or string.
-
"""
def open(self):
@@ -407,7 +395,7 @@ def rwString(self, val, length):
class BinaryRecordWriter(IORecord):
- r"""
+ """
Reads a single CCCC record in binary format.
Reads binary information sequentially.
@@ -567,7 +555,9 @@ class Stream:
"""
An abstract CCCC IO stream.
- .. warning:: This is more of a stream Parser/Serializer than an actual stream.
+ Warning
+ -------
+ This is more of a stream Parser/Serializer than an actual stream.
Notes
-----
@@ -674,7 +664,9 @@ class StreamWithDataContainer(Stream):
This is a relatively common pattern so some of the boilerplate
is handled here.
- .. warning:: This is more of a stream Parser/Serializer than an actual stream.
+ Warning
+ -------
+ This is more of a stream Parser/Serializer than an actual stream.
Notes
-----
diff --git a/armi/nuclearDataIO/cccc/compxs.py b/armi/nuclearDataIO/cccc/compxs.py
index 6256347eb..99bf03114 100644
--- a/armi/nuclearDataIO/cccc/compxs.py
+++ b/armi/nuclearDataIO/cccc/compxs.py
@@ -72,19 +72,20 @@
values are the first, second, and third dimension directional diffusion coefficient
additive terms, respectively.
"""
-from scipy.sparse import csc_matrix
from traceback import format_exc
-import numpy
+
+from scipy.sparse import csc_matrix
+import numpy as np
from armi import runLog
from armi.nuclearDataIO import cccc
-from armi.utils.properties import unlockImmutableProperties, lockImmutableProperties
from armi.nuclearDataIO.xsCollections import XSCollection
from armi.nuclearDataIO.nuclearFileMetadata import (
RegionXSMetadata,
COMPXS_POWER_CONVERSION_FACTORS,
REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF,
)
+from armi.utils.properties import unlockImmutableProperties, lockImmutableProperties
def _getRegionIO():
@@ -238,7 +239,7 @@ def readWrite(self):
regionIO = _getRegionIO()(region, self, self._lib)
regionIO.rwRegionData()
self._rw5DRecord()
- except: # noqa: bare-except
+ except Exception:
raise OSError(
"Failed to {} {} \n\n\n{}".format(
"read" if self._isReading else "write", self, format_exc()
@@ -451,9 +452,9 @@ def addColumnData(self, dataj, indicesj):
self.indptr.append(len(dataj) + self.indptr[-1])
def makeSparse(self, sparseFunc=csc_matrix):
- self.data = numpy.array(self.data, dtype="d")
- self.indices = numpy.array(self.indices, dtype="d")
- self.indptr = numpy.array(self.indptr, dtype="d")
+ self.data = np.array(self.data, dtype="d")
+ self.indices = np.array(self.indices, dtype="d")
+ self.indptr = np.array(self.indptr, dtype="d")
return sparseFunc((self.data, self.indices, self.indptr), shape=self.shape)
@@ -570,14 +571,14 @@ def allocateXS(self, numGroups):
:py:meth:`makeScatteringMatrices`
"""
for xs in self._primaryXS:
- self.macros[xs] = numpy.zeros(numGroups)
+ self.macros[xs] = np.zeros(numGroups)
self.macros.totalScatter = _CompxsScatterMatrix((numGroups, numGroups))
if self.metadata["chiFlag"]:
- self.macros.fission = numpy.zeros(numGroups)
- self.macros.nuSigF = numpy.zeros(numGroups)
- self.macros.chi = numpy.zeros((numGroups, self.metadata["chiFlag"]))
+ self.macros.fission = np.zeros(numGroups)
+ self.macros.nuSigF = np.zeros(numGroups)
+ self.macros.chi = np.zeros((numGroups, self.metadata["chiFlag"]))
if self._getFileMetadata()["maxScatteringOrder"]:
for scatterOrder in range(
@@ -589,7 +590,7 @@ def allocateXS(self, numGroups):
for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:
self.metadata[datum] = (
- numpy.zeros(numGroups) if "Additive" in datum else numpy.ones(numGroups)
+ np.zeros(numGroups) if "Additive" in datum else np.ones(numGroups)
).tolist()
def makeScatteringMatrices(self):
diff --git a/armi/nuclearDataIO/cccc/dlayxs.py b/armi/nuclearDataIO/cccc/dlayxs.py
index f62cc3750..4364b92b1 100644
--- a/armi/nuclearDataIO/cccc/dlayxs.py
+++ b/armi/nuclearDataIO/cccc/dlayxs.py
@@ -13,17 +13,17 @@
# limitations under the License.
"""
-Module to read DLAYXS files, which contain delayed neutron precursor data, including decay constants and emission
-spectra.
+Module to read DLAYXS files, which contain delayed neutron precursor data, including decay constants
+and emission spectra.
-Similar to ISOTXS files, DLAYXS files are often created by a lattice physics code such as MC2 and used as input
-to a global flux solver such as DIF3D.
+Similar to ISOTXS files, DLAYXS files are often created by a lattice physics code such as MC2 and
+used as input to a global flux solver such as DIF3D.
This module implements reading and writing of the DLAYXS, consistent with [CCCC-IV]_.
"""
import collections
-import numpy
+import numpy as np
from armi import runLog
from armi.nucDirectory import nuclideBases
@@ -38,18 +38,20 @@ class DelayedNeutronData:
"""
Container of information about delayed neutron precursors.
- This info should be enough to perform point kinetics problems and to compute the delayed neutron fraction.
+ This info should be enough to perform point kinetics problems and to compute the delayed neutron
+ fraction.
- This object represents data related to either one nuclide (as read from a data library)
- or an average over many nuclides (as computed after a delayed-neutron fraction calculation).
+ This object represents data related to either one nuclide (as read from a data library) or an
+ average over many nuclides (as computed after a delayed-neutron fraction calculation).
- For a problem with P precursor groups and G energy groups, delayed neutron precursor information includes:
+ For a problem with P precursor groups and G energy groups, delayed neutron precursor information
+ includes the three attributes of this class listed below.
Attributes
----------
precursorDecayConstants : array
- This is P-length list of decay constants in (1/s) that characterize the decay rates of the delayed
- neutron precursors. When a precursor decays, it emits a delayed neutron.
+ This is P-length list of decay constants in (1/s) that characterize the decay rates of the
+ delayed neutron precursors. When a precursor decays, it emits a delayed neutron.
delayEmissionSpectrum : array
fraction of delayed neutrons emitted into each neutron energy group from each precursor family
@@ -58,18 +60,16 @@ class DelayedNeutronData:
Aka delayed-chi
delayNeutronsPerFission : array
- the multigroup number of delayed neutrons released per decay for each precursor group
- Note that this is equivalent to the number of delayed neutron precursors produced per fission in
- each family and energy group.
- Structure is identical to delayEmissionSpectrum. Aka delayed-nubar.
+ The multigroup number of delayed neutrons released per decay for each precursor group. Note
+ that this is equivalent to the number of delayed neutron precursors produced per fission in
+ each family and energy group. Structure is identical to delayEmissionSpectrum. Aka delayed-
+ nubar.
"""
def __init__(self, numEnergyGroups, numPrecursorGroups):
- self.precursorDecayConstants = numpy.zeros(numPrecursorGroups)
- self.delayEmissionSpectrum = numpy.zeros((numPrecursorGroups, numEnergyGroups))
- self.delayNeutronsPerFission = numpy.zeros(
- (numPrecursorGroups, numEnergyGroups)
- )
+ self.precursorDecayConstants = np.zeros(numPrecursorGroups)
+ self.delayEmissionSpectrum = np.zeros((numPrecursorGroups, numEnergyGroups))
+ self.delayNeutronsPerFission = np.zeros((numPrecursorGroups, numEnergyGroups))
def compare(lib1, lib2):
@@ -166,21 +166,11 @@ def G(self):
def generateAverageDelayedNeutronConstants(self):
"""
- Use externally-computed ``nuclideContributionFractions`` to produce an average ``DelayedNeutronData`` obj.
-
- Solves typical averaging equation but weights already sum to 1.0 so we
- can skip normalization at the end.
-
- Notes
- -----
- Long ago, the DLAYXS file had the same constants for each nuclide (!?) and this method
- simply took the first. Later, it was updated to take an importance- and abundance-weighted
- average of the values on the DLAYXS library.
+ Use externally-computed ``nuclideContributionFractions`` to produce an average
+ ``DelayedNeutronData`` object.
- A paper by Tuttle (1974) discusses some averaging but they end up saying that kinetics problems
- are mostly insensitive to the group constants ("errors of a few percent"). But in TWRs, we switch from U235 to Pu239
- and the difference may be important. We can try weighting by nuclide effective
- delayed neutron fractions beta_eff_nuclide/beta.
+ Solves typical averaging equation but weights already sum to 1.0 so we can skip
+ normalization at the end.
"""
avg = DelayedNeutronData(self.G, self.numPrecursorGroups)
@@ -274,7 +264,7 @@ def readWrite(self):
# if the data objects are empty, then we are reading, otherwise the data already exists...
# If we are reading, then we have to map all the metadata into the DelayedNeutronData structures
- if not numpy.any(list(self.dlayxs.values())[0].delayEmissionSpectrum):
+ if not np.any(list(self.dlayxs.values())[0].delayEmissionSpectrum):
for nuc, dlayData in self.dlayxs.items():
for ii, family in enumerate(self.dlayxs.nuclideFamily[nuc]):
dlayData.precursorDecayConstants[ii] = self.metadata[
diff --git a/armi/nuclearDataIO/cccc/fixsrc.py b/armi/nuclearDataIO/cccc/fixsrc.py
index 474255b34..abdedf611 100644
--- a/armi/nuclearDataIO/cccc/fixsrc.py
+++ b/armi/nuclearDataIO/cccc/fixsrc.py
@@ -20,7 +20,7 @@
"""
import collections
-import numpy
+import numpy as np
from armi import runLog
from armi.nuclearDataIO import cccc
@@ -28,7 +28,7 @@
def readBinary(fileName):
"""Read a binary FIXSRC file."""
- with FIXSRC(fileName, "rb", numpy.zeros((0, 0, 0, 0))) as fs:
+ with FIXSRC(fileName, "rb", np.zeros((0, 0, 0, 0))) as fs:
fs.readWrite()
return fs.fixSrc
@@ -40,13 +40,16 @@ def writeBinary(fileName, fixSrcArray):
class FIXSRC(cccc.Stream):
- r"""Read or write a binary FIXSRC file from DIF3D fixed source input."""
+ """Read or write a binary FIXSRC file from DIF3D fixed source input."""
def __init__(self, fileName, fileMode, fixSrc):
- r"""
- Initialize a gamma FIXSRC class for reading or writing a binary FIXSRC file for DIF3D gamma fixed source input.
- If the intent is to write a gamma FIXSRC file, the variable FIXSRC.fixSrc, which contains to-be-written
- core-wide multigroup gamma fixed source data, is constructed from an existing neutron RTFLUX file.
+ """
+ Initialize a gamma FIXSRC class for reading or writing a binary FIXSRC file for DIF3D gamma
+ fixed source input.
+
+ If the intent is to write a gamma FIXSRC file, the variable FIXSRC.fixSrc, which contains
+ to-be-written core-wide multigroup gamma fixed source data, is constructed from an existing
+ neutron RTFLUX file.
Parameters
----------
@@ -57,20 +60,17 @@ def __init__(self, fileName, fileMode, fixSrc):
If 'wb', this class writes a FIXSRC binary file.
If 'rb', this class reads a preexisting FIXSRC binary file.
- o : Operator object, optional
- If fileMode='wb', an ARMI operator must be specified in order to construct gamma fixed source data
- from a neutron RTFLUX file (requires reactor geometry and settings).
-
+ fixSrc : np.ndarray
+ Core-wide multigroup gamma fixed-source data.
"""
cccc.Stream.__init__(self, fileName, fileMode)
# copied from a sample FIXSRC output from "type 19" DIF3D input
self.label = "FIXSRC "
self.fileId = 1
-
self.fixSrc = fixSrc
- ni, nj, nz, ng = self.fixSrc.shape
+ ni, nj, nz, ng = self.fixSrc.shape
self.fc = collections.OrderedDict(
[
("itype", 0),
@@ -90,7 +90,7 @@ def __init__(self, fileName, fileMode, fixSrc):
)
def readWrite(self):
- r"""Read or write a binary FIXSRC file for DIF3D fixed source input."""
+ """Read or write a binary FIXSRC file for DIF3D fixed source input."""
runLog.info(
"{} gamma fixed source file {}".format(
"Reading" if "r" in self._fileMode else "Writing", self
@@ -107,19 +107,19 @@ def readWrite(self):
self._rw3DRecord(g, z)
def _rwFileID(self):
- r"""Read file identification information."""
+ """Read file identification information."""
with self.createRecord() as fileIdRecord:
self.label = fileIdRecord.rwString(self.label, 24)
self.fileId = fileIdRecord.rwInt(self.fileId)
def _rw1DRecord(self):
- r"""Read/write parameters from/to the FIXSRC 1D block (file control)."""
+ """Read/write parameters from/to the FIXSRC 1D block (file control)."""
with self.createRecord() as record:
for var in self.fc.keys():
self.fc[var] = record.rwInt(self.fc[var])
def _rw3DRecord(self, g, z):
- r"""
+ """
Read/write fixed source data from 3D block records.
Parameters
@@ -129,10 +129,8 @@ def _rw3DRecord(self, g, z):
z : int
The DIF3D axial node index.
-
"""
with self.createRecord() as record:
-
ni = self.fc["ninti"]
nj = self.fc["nintj"]
diff --git a/armi/nuclearDataIO/cccc/gamiso.py b/armi/nuclearDataIO/cccc/gamiso.py
index af5e7f493..5c0b4132a 100644
--- a/armi/nuclearDataIO/cccc/gamiso.py
+++ b/armi/nuclearDataIO/cccc/gamiso.py
@@ -15,8 +15,8 @@
"""
Module for reading GAMISO files which contains gamma cross section data.
-GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross sections. GAMISO data is
-contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
+GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross
+sections. GAMISO data is contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
.. impl:: Tool to read and write GAMISO files.
:id: I_ARMI_NUCDATA_GAMISO
@@ -31,8 +31,9 @@
See [GAMSOR]_.
-.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D Flux Solution. United States:
- N. p., 2016. Web. doi:10.2172/1343095. `On OSTI `_
+.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D
+ Flux Solution. United States: N. p., 2016. Web. doi:10.2172/1343095. `On OSTI
+ `__
"""
from armi import runLog
@@ -80,8 +81,8 @@ def addDummyNuclidesToLibrary(lib, dummyNuclides):
Notes
-----
- Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a
- consistent set of nuclide-level data across all the nuclides in a
+ Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to
+ provide a consistent set of nuclide-level data across all the nuclides in a
:py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
"""
if not dummyNuclides:
diff --git a/armi/nuclearDataIO/cccc/geodst.py b/armi/nuclearDataIO/cccc/geodst.py
index 2207f48e3..3515b5e2e 100644
--- a/armi/nuclearDataIO/cccc/geodst.py
+++ b/armi/nuclearDataIO/cccc/geodst.py
@@ -29,7 +29,7 @@
"""
-import numpy
+import numpy as np
from armi.nuclearDataIO import cccc
@@ -291,13 +291,13 @@ def _rw6DRecord(self):
if self._data.coarseMeshRegions is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
- self._data.coarseMeshRegions = numpy.zeros(
+ self._data.coarseMeshRegions = np.zeros(
(
self._metadata["NCINTI"],
self._metadata["NCINTJ"],
self._metadata["NCINTK"],
),
- dtype=numpy.int16,
+ dtype=np.int16,
)
for ki in range(self._metadata["NCINTK"]):
with self.createRecord() as record:
@@ -312,13 +312,13 @@ def _rw7DRecord(self):
if self._data.fineMeshRegions is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
- self._data.fineMeshRegions = numpy.zeros(
+ self._data.fineMeshRegions = np.zeros(
(
self._metadata["NINTI"],
self._metadata["NINTJ"],
self._metadata["NINTK"],
),
- dtype=numpy.int16,
+ dtype=np.int16,
)
for ki in range(self._metadata["NINTK"]):
with self.createRecord() as record:
diff --git a/armi/nuclearDataIO/cccc/isotxs.py b/armi/nuclearDataIO/cccc/isotxs.py
index 213529c0d..4005fc0f5 100644
--- a/armi/nuclearDataIO/cccc/isotxs.py
+++ b/armi/nuclearDataIO/cccc/isotxs.py
@@ -40,7 +40,7 @@
import traceback
import itertools
-import numpy
+import numpy as np
from scipy import sparse
from armi import runLog
@@ -327,7 +327,7 @@ def readWrite(self):
self._lib[nucLabel] = nuc
nuclideIO = self._getNuclideIO()(nuc, self, self._lib)
nuclideIO.rwNuclide()
- except: # noqa: bare-except
+ except Exception:
raise OSError(
"Failed to read/write {} \n\n\n{}".format(self, traceback.format_exc())
)
@@ -370,8 +370,8 @@ def _rw2DRecord(self, numNucs, nucNames):
Notes
-----
- Contains isotope names, global chi distribution, energy group structure, and locations of each nuclide record
- in the file
+ Contains isotope names, global chi distribution, energy group structure, and locations of
+ each nuclide record in the file
"""
with self.createRecord() as record:
# skip "merger test..." string
@@ -691,7 +691,7 @@ def _rw7DRecord(self, blockNumIndex, subBlock):
if scatter is None:
# we're reading.
scatter = sparse.csr_matrix(
- (numpy.array(dataVals), indices, indptr), shape=(ng, ng)
+ (np.array(dataVals), indices, indptr), shape=(ng, ng)
)
scatter.eliminate_zeros()
self._setScatterMatrix(blockNumIndex, scatter)
@@ -714,7 +714,7 @@ def _getScatterBlockNum(self, scatterType):
A index of the scatter matrix.
"""
try:
- return numpy.where(self._metadata["scatFlag"] == scatterType)[0][0]
+ return np.where(self._metadata["scatFlag"] == scatterType)[0][0]
except IndexError:
return None
diff --git a/armi/nuclearDataIO/cccc/pmatrx.py b/armi/nuclearDataIO/cccc/pmatrx.py
index 9bdbe5f25..2ae73ceff 100644
--- a/armi/nuclearDataIO/cccc/pmatrx.py
+++ b/armi/nuclearDataIO/cccc/pmatrx.py
@@ -17,9 +17,10 @@
See [GAMSOR]_ and [MC23]_.
-.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section Generation Code for Fast Reactor
- Analysis Nuclear. United States: N. p., 2018. Web. doi:10.2172/1483949.
- (`OSTI `_)
+.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section
+ Generation Code for Fast Reactor Analysis Nuclear. United States: N. p., 2018. Web.
+ doi:10.2172/1483949. (`OSTI
+ `__)
"""
import traceback
@@ -148,12 +149,16 @@ def _read(fileName, fileMode):
def writeBinary(lib, fileName):
- """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to a binary file."""
+ """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
+ object to a binary file.
+ """
return _write(lib, fileName, "wb")
def writeAscii(lib, fileName):
- """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to an ASCII file."""
+ """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
+ object to an ASCII file.
+ """
return _write(lib, fileName, "w")
@@ -164,6 +169,7 @@ def _write(lib, fileName, fileMode):
def _readWrite(lib, fileName, fileMode, getNuclideFunc):
with PmatrxIO(fileName, lib, fileMode, getNuclideFunc) as rw:
rw.readWrite()
+
return lib
@@ -216,7 +222,7 @@ def readWrite(self):
self._rwGroupStructure()
self._rwDoseConversionFactor()
self._rwIsotopes(numNucs)
- except: # noqa: bare-except
+ except Exception:
runLog.error(traceback.format_exc())
raise OSError("Failed to read/write {}".format(self))
finally:
diff --git a/armi/nuclearDataIO/cccc/pwdint.py b/armi/nuclearDataIO/cccc/pwdint.py
index 1505ff094..7d1409a36 100644
--- a/armi/nuclearDataIO/cccc/pwdint.py
+++ b/armi/nuclearDataIO/cccc/pwdint.py
@@ -25,7 +25,7 @@
"""
-import numpy
+import numpy as np
from armi.nuclearDataIO import cccc
@@ -54,7 +54,7 @@ class PwdintData(cccc.DataContainer):
def __init__(self):
cccc.DataContainer.__init__(self)
- self.powerDensity = numpy.array([])
+ self.powerDensity = np.array([])
class PwdintStream(cccc.StreamWithDataContainer):
@@ -111,9 +111,9 @@ def _rw2DRecord(self):
if self._data.powerDensity.size == 0:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
- self._data.powerDensity = numpy.zeros(
+ self._data.powerDensity = np.zeros(
(imax, jmax, kmax),
- dtype=numpy.float32,
+ dtype=np.float32,
)
for ki in range(kmax):
for bi in range(nblck):
diff --git a/armi/nuclearDataIO/cccc/rtflux.py b/armi/nuclearDataIO/cccc/rtflux.py
index 39aa86707..30bdec2ad 100644
--- a/armi/nuclearDataIO/cccc/rtflux.py
+++ b/armi/nuclearDataIO/cccc/rtflux.py
@@ -34,7 +34,7 @@
RZFLUX
Reads/writes total fluxes from zones
"""
-import numpy
+import numpy as np
from armi.nuclearDataIO import cccc
@@ -67,7 +67,7 @@ class RtfluxData(cccc.DataContainer):
def __init__(self):
cccc.DataContainer.__init__(self)
- self.groupFluxes: numpy.ndarray = numpy.array([])
+ self.groupFluxes: np.ndarray = np.array([])
"""Maps i,j,k,g indices to total real or adjoint flux in n/cm^2-s"""
@@ -138,7 +138,7 @@ def _rw3DRecord(self):
nblck = self._metadata["NBLOK"]
if self._data.groupFluxes.size == 0:
- self._data.groupFluxes = numpy.zeros((imax, jmax, kmax, ng))
+ self._data.groupFluxes = np.zeros((imax, jmax, kmax, ng))
for gi in range(ng):
gEff = self.getEnergyGroupIndex(gi)
diff --git a/armi/nuclearDataIO/cccc/rzflux.py b/armi/nuclearDataIO/cccc/rzflux.py
index c5692f41e..851fd6d4d 100644
--- a/armi/nuclearDataIO/cccc/rzflux.py
+++ b/armi/nuclearDataIO/cccc/rzflux.py
@@ -28,7 +28,7 @@
"""
from enum import Enum
-import numpy
+import numpy as np
from armi.nuclearDataIO import cccc
@@ -144,9 +144,9 @@ def _rw2DRecord(self):
if self._data.groupFluxes is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
- self._data.groupFluxes = numpy.zeros(
+ self._data.groupFluxes = np.zeros(
(ng, nz),
- dtype=numpy.float32,
+ dtype=np.float32,
)
for bi in range(nb):
jLow, jUp = cccc.getBlockBandwidth(bi + 1, nz, nb)
diff --git a/armi/nuclearDataIO/cccc/tests/test_compxs.py b/armi/nuclearDataIO/cccc/tests/test_compxs.py
index efcdcd71e..ca0329e4d 100644
--- a/armi/nuclearDataIO/cccc/tests/test_compxs.py
+++ b/armi/nuclearDataIO/cccc/tests/test_compxs.py
@@ -16,7 +16,7 @@
import os
import unittest
-import numpy
+import numpy as np
from scipy.sparse import csc_matrix
from armi import nuclearDataIO
@@ -149,7 +149,7 @@ def test_regionPrimaryXS(self):
}
for xsName, expectedXS in expectedMacros.items():
actualXS = self.fissileRegion.macros[xsName]
- self.assertTrue(numpy.allclose(actualXS, expectedXS))
+ self.assertTrue(np.allclose(actualXS, expectedXS))
def test_totalScatterMatrix(self):
"""
@@ -164,7 +164,7 @@ def test_totalScatterMatrix(self):
--------
scipy.sparse.csc_matrix
"""
- expectedSparseData = numpy.array(
+ expectedSparseData = np.array(
[
1.15905297e-01,
1.50461698e-01,
@@ -290,7 +290,7 @@ def test_totalScatterMatrix(self):
actualTotalScatter.shape,
).toarray()
- self.assertTrue(numpy.allclose(actualTotalScatter, expectedTotalScatter))
+ self.assertTrue(np.allclose(actualTotalScatter, expectedTotalScatter))
def test_binaryRW(self):
"""Test to make sure the binary read/writer reads/writes the exact same library."""
diff --git a/armi/nuclearDataIO/cccc/tests/test_dlayxs.py b/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
index 929b7450b..08cc08d31 100644
--- a/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
+++ b/armi/nuclearDataIO/cccc/tests/test_dlayxs.py
@@ -17,7 +17,7 @@
import filecmp
import unittest
-import numpy
+import numpy as np
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO.cccc import dlayxs
@@ -40,13 +40,13 @@ def test_decayConstants(self):
"""
delay = self.dlayxs3
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["PU239"]].precursorDecayConstants,
[0.013271, 0.030881, 0.11337, 0.29249999, 0.85749, 2.72970009],
)
)
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["U235"]].precursorDecayConstants,
[0.013336, 0.032739, 0.12078, 0.30278, 0.84948999, 2.85299993],
)
@@ -56,7 +56,7 @@ def test_chi_delay(self):
"""Test that all emission spectrum delayEmissionSpectrum is normalized."""
delay = self.dlayxs3
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["PU239"]].delayEmissionSpectrum[0, :],
[
0.00000000e00,
@@ -96,7 +96,7 @@ def test_chi_delay(self):
)
)
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["U235"]].delayEmissionSpectrum[0, :],
[
0.00000000e00,
@@ -146,7 +146,7 @@ def test_NuDelay(self):
# was used to make sure the data wasn't accidentally transposed, hence there are two nuclides with two vectors
# being tested.
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["PU239"]].delayNeutronsPerFission[0, :],
[
0.00015611,
@@ -186,7 +186,7 @@ def test_NuDelay(self):
)
)
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["PU239"]].delayNeutronsPerFission[1, :],
[
0.00101669,
@@ -226,7 +226,7 @@ def test_NuDelay(self):
)
)
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["U235"]].delayNeutronsPerFission[0, :],
[
0.000315,
@@ -266,7 +266,7 @@ def test_NuDelay(self):
)
)
self.assertTrue(
- numpy.allclose(
+ np.allclose(
delay[nuclideBases.byName["U235"]].delayNeutronsPerFission[1, :],
[
0.0016254,
@@ -923,9 +923,10 @@ def _assertDC(self, nucName, endfProvidedData):
dlayData = self.dlayxs3[
nuclideBases.byName[nucName.strip()]
].precursorDecayConstants
- self.assertTrue(numpy.allclose(dlayData, endfProvidedData, 1e-3))
+ self.assertTrue(np.allclose(dlayData, endfProvidedData, 1e-3))
except AssertionError:
- # this is reraised because generating the message might take some time to format all the data from the arrays
+ # this is reraised because generating the message might take some time to format all the
+ # data from the arrays
raise AssertionError(
"{} was different,\nexpected:{}\nactual:{}".format(
nucName, endfProvidedData, dlayData
@@ -935,7 +936,8 @@ def _assertDC(self, nucName, endfProvidedData):
pass
@unittest.skip(
- "All the delayNeutronsPerFission data from mcc-v3 does not agree, this may be because they are from ENDV/B VI.8."
+ "All the delayNeutronsPerFission data from mcc-v3 does not agree, this may be because they "
+ "are from ENDV/B VI.8."
)
def test_ENDFVII1NeutronsPerFission(self):
"""
@@ -1059,13 +1061,14 @@ def _assertNuDelay(self, nucName, endfProvidedData):
dlayData = self.dlayxs3[
nuclideBases.byName[nucName.strip()]
].delayNeutronsPerFission
- numpyData = numpy.array(endfProvidedData)
- self.assertTrue(numpy.allclose(dlayData, numpyData, 1e-3))
+ npData = np.array(endfProvidedData)
+ self.assertTrue(np.allclose(dlayData, npData, 1e-3))
except AssertionError:
- # this is reraised because generating the message might take some time to format all the data from the arrays
+ # this is reraised because generating the message might take some time to format all the
+ # data from the arrays
raise AssertionError(
"{} was different,\nexpected:{}\nactual:{}".format(
- nucName, numpyData, dlayData
+ nucName, npData, dlayData
)
)
except KeyError:
@@ -1114,12 +1117,12 @@ def test_avg(self):
with self.assertRaises(RuntimeError):
_avg = self.dlayxs3.generateAverageDelayedNeutronConstants()
- fracs = dict(zip(self.dlayxs3.keys(), numpy.zeros(len(self.dlayxs3))))
+ fracs = dict(zip(self.dlayxs3.keys(), np.zeros(len(self.dlayxs3))))
u235 = nuclideBases.byName["U235"]
fracs[u235] = 1.0
self.dlayxs3.nuclideContributionFractions = fracs
avg = self.dlayxs3.generateAverageDelayedNeutronConstants()
dlayU235 = self.dlayxs3[u235]
self.assertTrue(
- numpy.allclose(avg.delayEmissionSpectrum, dlayU235.delayEmissionSpectrum)
+ np.allclose(avg.delayEmissionSpectrum, dlayU235.delayEmissionSpectrum)
)
diff --git a/armi/nuclearDataIO/cccc/tests/test_fixsrc.py b/armi/nuclearDataIO/cccc/tests/test_fixsrc.py
new file mode 100644
index 000000000..531b3036f
--- /dev/null
+++ b/armi/nuclearDataIO/cccc/tests/test_fixsrc.py
@@ -0,0 +1,41 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test the reading and writing of the DIF3D FIXSRC file format."""
+import numpy as np
+import os
+import unittest
+
+from armi.nuclearDataIO.cccc import fixsrc
+from armi.utils.directoryChangers import TemporaryDirectoryChanger
+
+# ruff: noqa: E501
+FIXSRC_ASCII = """0 0 0 0 0 0 0.4008E+10 0.4210E+10 0.4822E+10 0.5154E+10 0.4926E+10 0.4621E+10
+0.4246E+10 0.3757E+10 0.3311E+10 0.3479E+10 0.357E+10 0.324E+10 0.2942E+10 0.2903E+10 0.2925E+10 0.2763E+10 0.2414E+10 0.2036E+10
+0.1656E+10 0.1477E+10 0.1455E+10 0.1434E+10 0.1297E+10 0.1153E+10 0.101E+10 0.8841E+9 0.7923E+9 0.7266E+9 0.6575E+9 0.589E+9
+0.5027E+9 0.4146E+9 0.3474E+9 0.3015E+9 0.2403E+9 0.2356E+9 0.1634E+9 0.1521E+9 0.1258E+9 0.9032E+8 0.6156E+8 0.3983E+8
+0.3134E+8 0.303E+8 0.2983E+8 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0"""
+FIXSRC_ARRAY = np.array(FIXSRC_ASCII.split(), dtype=np.float32).reshape((3, 3, 2, 4))
+
+
+class TestFixsrc(unittest.TestCase):
+ def test_writeReadBinaryLoop(self):
+ with TemporaryDirectoryChanger() as newDir:
+ fileName = "fixsrc_writeBinary.bin"
+ binaryFilePath = os.path.join(newDir.destination, fileName)
+ fixsrc.writeBinary(binaryFilePath, FIXSRC_ARRAY)
+
+ self.assertIn(fileName, os.listdir(newDir.destination))
+ self.assertGreater(os.path.getsize(binaryFilePath), 0)
diff --git a/armi/nuclearDataIO/tests/test_xsLibraries.py b/armi/nuclearDataIO/tests/test_xsLibraries.py
index 35d7f6066..324206e9e 100644
--- a/armi/nuclearDataIO/tests/test_xsLibraries.py
+++ b/armi/nuclearDataIO/tests/test_xsLibraries.py
@@ -18,13 +18,12 @@
import traceback
import unittest
+import numpy as np
from six.moves import cPickle
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import xsLibraries
-from armi.nuclearDataIO.cccc import gamiso
-from armi.nuclearDataIO.cccc import isotxs
-from armi.nuclearDataIO.cccc import pmatrx
+from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx
from armi.tests import mockRunLogs
from armi.utils import properties
from armi.utils.directoryChangers import TemporaryDirectoryChanger
@@ -54,8 +53,8 @@
UFG_FLUX_EDIT = os.path.join(FIXTURE_DIR, "mc2v3-AA.flux_ufg")
-class TempFileMixin(unittest.TestCase):
- """really a test case."""
+class TempFileMixin:
+ """Not a test; just helpful test tooling."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
@@ -72,7 +71,7 @@ def testFileName(self):
)
-class TestXSLibrary(TempFileMixin):
+class TestXSLibrary(TempFileMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.isotxsAA = isotxs.readBinary(ISOTXS_AA)
@@ -84,7 +83,7 @@ def setUpClass(cls):
cls.xsLib.merge(copy.deepcopy(cls.isotxsAA))
cls.xsLib.merge(copy.deepcopy(cls.gamisoAA))
cls.xsLib.merge(copy.deepcopy(cls.pmatrxAA))
- except: # noqa: bare-except
+ except Exception:
cls.xsLibGenerationErrorStack = traceback.format_exc()
def test_canPickleAndUnpickleISOTXS(self):
@@ -229,7 +228,7 @@ def test_canWritePmatrxFromCombinedXSLibrary(self):
def _canWritefromCombined(self, writer, refFile):
if self.xsLibGenerationErrorStack is not None:
print(self.xsLibGenerationErrorStack)
- raise Exception("see stdout for stack trace")
+ raise Exception("See stdout for stack trace")
# check to make sure they labels overlap... or are actually the same
writer.writeBinary(self.xsLib, self.testFileName)
self.assertTrue(filecmp.cmp(refFile, self.testFileName))
@@ -288,40 +287,23 @@ def assert_contains_only(self, container, shouldBeThere, shouldNotBeThere):
self.assertEqual(set(), container & set(shouldNotBeThere))
-# NOTE: This is just a base class, so it isn't run directly.
-class TestXSlibraryMerging(TempFileMixin):
- """A shared class that defines tests that should be true for all IsotxsLibrary merging."""
+class AbstractTestXSlibraryMerging(TempFileMixin):
+ """
+ A shared class that defines tests that should be true for all IsotxsLibrary merging.
- @classmethod
- def setUpClass(cls):
- cls.libAA = None
- cls.libAB = None
- cls.libCombined = None
- cls.libLumped = None
-
- @classmethod
- def tearDownClass(cls):
- cls.libAA = None
- cls.libAB = None
- cls.libCombined = None
- cls.libLumped = None
- del cls.libAA
- del cls.libAB
- del cls.libCombined
- del cls.libLumped
+ Notes
+ -----
+ This is just a base class, it isn't run directly.
+ """
def setUp(self):
TempFileMixin.setUp(self)
- # load a library that is in the ARMI tree. This should
- # be a small library with LFPs, Actinides, structure, and coolant
- for attrName, path in [
- ("libAA", self.getLibAAPath),
- ("libAB", self.getLibABPath),
- ("libCombined", self.getLibAA_ABPath),
- ("libLumped", self.getLibLumpedPath),
- ]:
- if getattr(self.__class__, attrName) is None:
- setattr(self.__class__, attrName, self.getReadFunc()(path()))
+ # Load a library that is in the ARMI tree. This should be a small library with LFPs,
+ # Actinides, structure, and coolant
+ self.libAA = self.getReadFunc()(self.getLibAAPath())
+ self.libAB = self.getReadFunc()(self.getLibABPath())
+ self.libCombined = self.getReadFunc()(self.getLibAA_ABPath())
+ self.libLumped = self.getReadFunc()(self.getLibLumpedPath())
def getErrorType(self):
raise NotImplementedError()
@@ -356,24 +338,24 @@ def test_cannotMergeXSLibWithSameNuclideNames(self):
def test_cannotMergeXSLibxWithDifferentGroupStructure(self):
dummyXsLib = xsLibraries.IsotxsLibrary()
- dummyXsLib.neutronEnergyUpperBounds = [1, 2, 3]
- dummyXsLib.gammaEnergyUpperBounds = [1, 2, 3]
+ dummyXsLib.neutronEnergyUpperBounds = np.array([1, 2, 3])
+ dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])
with self.assertRaises(properties.ImmutablePropertyError):
dummyXsLib.merge(self.libCombined)
def test_mergeEmptyXSLibWithOtherEssentiallyClonesTheOther(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
- self.__class__.libAA = None
+ self.libAA = None
self.getWriteFunc()(emptyXSLib, self.testFileName)
self.assertTrue(filecmp.cmp(self.getLibAAPath(), self.testFileName))
def test_mergeTwoXSLibFiles(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
- self.__class__.libAA = None
+ self.libAA = None
emptyXSLib.merge(self.libAB)
- self.__class__.libAB = None
+ self.libAB = None
self.assertEqual(
set(self.libCombined.nuclideLabels), set(emptyXSLib.nuclideLabels)
)
@@ -384,9 +366,9 @@ def test_mergeTwoXSLibFiles(self):
def test_canRemoveIsotopes(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
- self.__class__.libAA = None
+ self.libAA = None
emptyXSLib.merge(self.libAB)
- self.__class__.libAB = None
+ self.libAB = None
for nucId in [
"ZR93_7",
"ZR95_7",
@@ -410,7 +392,7 @@ def test_canRemoveIsotopes(self):
self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName))
-class Pmatrx_merge_Tests(TestXSlibraryMerging):
+class Pmatrx_Merge_Tests(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
return OSError
@@ -439,12 +421,12 @@ def test_canRemoveIsotopes(self):
def test_cannotMergeXSLibsWithDifferentGammaGroupStructures(self):
dummyXsLib = xsLibraries.IsotxsLibrary()
- dummyXsLib.gammaEnergyUpperBounds = [1, 2, 3]
+ dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])
with self.assertRaises(properties.ImmutablePropertyError):
dummyXsLib.merge(self.libCombined)
-class Isotxs_merge_Tests(TestXSlibraryMerging):
+class Isotxs_Merge_Tests(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
return OSError
@@ -467,7 +449,7 @@ def getLibLumpedPath(self):
return ISOTXS_LUMPED
-class Gamiso_merge_Tests(TestXSlibraryMerging):
+class Gamiso_Merge_Tests(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
return OSError
@@ -490,48 +472,17 @@ def getLibLumpedPath(self):
return GAMISO_LUMPED
-class Combined_merge_Tests(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.isotxsAA = None
- cls.isotxsAB = None
- cls.gamisoAA = None
- cls.gamisoAB = None
- cls.pmatrxAA = None
- cls.pmatrxAB = None
- cls.libCombined = None
-
- @classmethod
- def tearDownClass(cls):
- cls.isotxsAA = None
- cls.isotxsAB = None
- cls.gamisoAA = None
- cls.gamisoAB = None
- cls.pmatrxAA = None
- cls.pmatrxAB = None
- cls.libCombined = None
- del cls.isotxsAA
- del cls.isotxsAB
- del cls.gamisoAA
- del cls.gamisoAB
- del cls.pmatrxAA
- del cls.pmatrxAB
- del cls.libCombined
-
+class Combined_Merge_Tests(unittest.TestCase):
def setUp(self):
- # load a library that is in the ARMI tree. This should
- # be a small library with LFPs, Actinides, structure, and coolant
- for attrName, path, readFunc in [
- ("isotxsAA", ISOTXS_AA, isotxs.readBinary),
- ("gamisoAA", GAMISO_AA, gamiso.readBinary),
- ("pmatrxAA", PMATRX_AA, pmatrx.readBinary),
- ("isotxsAB", ISOTXS_AB, isotxs.readBinary),
- ("gamisoAB", GAMISO_AB, gamiso.readBinary),
- ("pmatrxAB", PMATRX_AB, pmatrx.readBinary),
- ("libCombined", ISOTXS_AA_AB, isotxs.readBinary),
- ]:
- if getattr(self.__class__, attrName) is None:
- setattr(self.__class__, attrName, readFunc(path))
+ # Load a library that is in the ARMI tree. This should be a small library with LFPs,
+ # Actinides, structure, and coolant
+ self.isotxsAA = isotxs.readBinary(ISOTXS_AA)
+ self.gamisoAA = gamiso.readBinary(GAMISO_AA)
+ self.pmatrxAA = pmatrx.readBinary(PMATRX_AA)
+ self.isotxsAB = isotxs.readBinary(ISOTXS_AB)
+ self.gamisoAB = gamiso.readBinary(GAMISO_AB)
+ self.pmatrxAB = pmatrx.readBinary(PMATRX_AB)
+ self.libCombined = isotxs.readBinary(ISOTXS_AA_AB)
def test_mergeAllXSLibFiles(self):
lib = xsLibraries.IsotxsLibrary()
@@ -539,7 +490,3 @@ def test_mergeAllXSLibFiles(self):
lib, xsLibrarySuffix="", mergeGammaLibs=True, alternateDirectory=FIXTURE_DIR
)
self.assertEqual(set(lib.nuclideLabels), set(self.libCombined.nuclideLabels))
-
-
-# Remove the abstract class, so that it does not run (all tests would fail)
-del TestXSlibraryMerging
diff --git a/armi/nuclearDataIO/xsCollections.py b/armi/nuclearDataIO/xsCollections.py
index ed1a70f6f..faf7978ba 100644
--- a/armi/nuclearDataIO/xsCollections.py
+++ b/armi/nuclearDataIO/xsCollections.py
@@ -15,11 +15,12 @@
"""
Cross section collections contain cross sections for a single nuclide or region.
-Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`, which
-then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
+Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`,
+which then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
-These may represent microscopic or macroscopic neutron or photon cross sections. When they are macroscopic,
-they generally represent a whole region with many nuclides, though this is not required.
+These may represent microscopic or macroscopic neutron or photon cross sections. When they are
+macroscopic, they generally represent a whole region with many nuclides, though this is not
+required.
See Also
--------
@@ -37,7 +38,7 @@
blocksWithMacros = mc.createMacrosOnBlocklist(microLib, blocks)
"""
-import numpy
+import numpy as np
from scipy import sparse
from armi import runLog
@@ -115,7 +116,7 @@ class XSCollection:
def getDefaultXs(cls, numGroups):
default = cls._zeroes.get(numGroups, None)
if default is None:
- default = numpy.zeros(numGroups)
+ default = np.zeros(numGroups)
cls._zeroes[numGroups] = default
return default
@@ -232,8 +233,8 @@ def clear(self):
# it should either be a list, a numpy array, or a sparse matrix
if isinstance(value, list):
value = [0.0] * len(value)
- elif isinstance(value, numpy.ndarray):
- value = numpy.zeros(value.shape)
+ elif isinstance(value, np.ndarray):
+ value = np.zeros(value.shape)
elif value is None: # assume it is scipy.sparse
pass
elif value.nnz >= 0:
@@ -267,7 +268,7 @@ def collapseCrossSection(crossSection, weights):
oneGroupXS : float
The one group cross section in the same units as the input cross section.
"""
- mult = numpy.array(crossSection) * numpy.array(weights)
+ mult = np.array(crossSection) * np.array(weights)
return sum(mult) / sum(weights)
def compare(self, other, flux, relativeTolerance=0, verbose=False):
@@ -288,7 +289,7 @@ def compare(self, other, flux, relativeTolerance=0, verbose=False):
)
elif sparse.issparse(myXsData) and sparse.issparse(theirXsData):
- if not numpy.allclose(
+ if not np.allclose(
myXsData.todense(),
theirXsData.todense(),
rtol=relativeTolerance,
@@ -447,7 +448,7 @@ def createMacrosFromMicros(
def _initializeMacros(self):
m = self.macros
for xsName in BASIC_XS + DERIVED_XS:
- setattr(m, xsName, numpy.zeros(self.ng))
+ setattr(m, xsName, np.zeros(self.ng))
for matrixName in BASIC_SCAT_MATRIX:
# lil_matrices are good for indexing but bad for certain math operations.
@@ -542,6 +543,7 @@ def _computeRemovalXS(self):
self.macros.removal += columnSum - diags
+# ruff: noqa: E501
def computeBlockAverageChi(b, isotxsLib):
r"""
Return the block average total chi vector based on isotope chi vectors.
@@ -572,7 +574,7 @@ def computeBlockAverageChi(b, isotxsLib):
fission source weighting).
"""
numGroups = isotxsLib.numGroups
- numerator = numpy.zeros(numGroups)
+ numerator = np.zeros(numGroups)
denominator = 0.0
numberDensities = b.getNumberDensities()
for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()):
@@ -584,7 +586,7 @@ def computeBlockAverageChi(b, isotxsLib):
if denominator != 0.0:
return numerator / denominator
else:
- return numpy.zeros(numGroups)
+ return np.zeros(numGroups)
def _getLibTypeSuffix(libType):
@@ -623,7 +625,7 @@ def computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix):
Returns
-------
- energyDepositionConsts : numpy array
+ energyDepositionConsts : np.ndarray
Neutron energy deposition group constants. (J/cm)
Notes
@@ -662,7 +664,7 @@ def computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix):
Returns
-------
- energyDepositionConsts : numpy array
+ energyDepositionConsts : np.ndarray
gamma energy deposition group constants. (J/cm)
Notes
@@ -705,7 +707,7 @@ def computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix):
Returns
-------
- fissionEnergyFactor: numpy.array
+ fissionEnergyFactor: np.ndarray
Fission energy generation group constants (in Joules/cm)
"""
fissionEnergyFactor = computeMacroscopicGroupConstants(
@@ -749,14 +751,14 @@ def computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix):
Returns
-------
- captureEnergyFactor: numpy.array
+ captureEnergyFactor: np.ndarray
Capture energy generation group constants (in Joules/cm)
"""
captureEnergyFactor = None
for xs in CAPTURE_XS:
if captureEnergyFactor is None:
- captureEnergyFactor = numpy.zeros(
- numpy.shape(
+ captureEnergyFactor = np.zeros(
+ np.shape(
computeMacroscopicGroupConstants(
xs, numberDensities, lib, microSuffix, libType="micros"
)
@@ -827,33 +829,27 @@ def computeMacroscopicGroupConstants(
constantName : str
Name of the reaction for which to obtain the group constants. This name should match a
cross section name or an attribute in the collection.
-
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
-
lib : library object
Microscopic cross section library.
-
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
-
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
-
multConstant : str, optional
Name of constant by which the group constants will be multiplied. This name should match a
cross section name or an attribute in the collection.
-
multLib : library object, optional
Microscopic cross section nuclide library to obtain the multiplier from.
If None, same library as base cross section is used.
Returns
-------
- macroGroupConstant : numpy array
+ macroGroupConstant : np.ndarray
Macroscopic group constants for the requested reaction.
"""
skippedNuclides = []
@@ -888,16 +884,16 @@ def computeMacroscopicGroupConstants(
multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)
if macroGroupConstants is None:
- macroGroupConstants = numpy.zeros(microGroupConstants.shape)
+ macroGroupConstants = np.zeros(microGroupConstants.shape)
if (
microGroupConstants.shape != macroGroupConstants.shape
and not microGroupConstants.any()
):
- microGroupConstants = numpy.zeros(macroGroupConstants.shape)
+ microGroupConstants = np.zeros(macroGroupConstants.shape)
macroGroupConstants += (
- numpy.asarray(numberDensity) * microGroupConstants * multiplierVal
+ np.asarray(numberDensity) * microGroupConstants * multiplierVal
)
if skippedNuclides:
@@ -923,12 +919,12 @@ def _getXsMultiplier(libNuclide, multiplier, libType):
try:
microCollection = getattr(libNuclide, libType)
multiplierVal = getattr(microCollection, multiplier)
- except: # noqa: bare-except
+ except Exception:
multiplierVal = libNuclide.isotxsMetadata[multiplier]
else:
multiplierVal = 1.0
- return numpy.asarray(multiplierVal)
+ return np.asarray(multiplierVal)
def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):
@@ -937,7 +933,7 @@ def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):
else:
microCollection = libNuclide
- microGroupConstants = numpy.asarray(getattr(microCollection, constantName))
+ microGroupConstants = np.asarray(getattr(microCollection, constantName))
if not microGroupConstants.any():
runLog.debug(
diff --git a/armi/operators/operator.py b/armi/operators/operator.py
index 1d567e6bf..09df06b3a 100644
--- a/armi/operators/operator.py
+++ b/armi/operators/operator.py
@@ -34,12 +34,12 @@
from armi import runLog
from armi.bookkeeping import memoryProfiler
from armi.bookkeeping.report import reportingUtils
-from armi.operators import settingsValidation
from armi.operators.runTypes import RunTypes
from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC
from armi.physics.neutronics.globalFlux.globalFluxInterface import (
GlobalFluxInterfaceUsingExecuters,
)
+from armi.settings import settingsValidation
from armi.settings.fwSettings.globalSettings import (
CONF_TIGHT_COUPLING,
CONF_TIGHT_COUPLING_MAX_ITERS,
@@ -469,7 +469,7 @@ def _performTightCoupling(self, cycle: int, timeNode: int, writeDB: bool = True)
if writeDB:
# database has not yet been written, so we need to write it.
dbi = self.getInterface("database")
- dbi.writeDBEveryNode(cycle, timeNode)
+ dbi.writeDBEveryNode()
def _interactAll(self, interactionName, activeInterfaces, *args):
"""
@@ -487,9 +487,7 @@ def _interactAll(self, interactionName, activeInterfaces, *args):
halt = False
- cycleNodeTag = self._expandCycleAndTimeNodeArgs(
- *args, interactionName=interactionName
- )
+ cycleNodeTag = self._expandCycleAndTimeNodeArgs(interactionName)
runLog.header(
"=========== Triggering {} Event ===========".format(
interactionName + cycleNodeTag
@@ -497,9 +495,7 @@ def _interactAll(self, interactionName, activeInterfaces, *args):
)
for statePointIndex, interface in enumerate(activeInterfaces, start=1):
- self.printInterfaceSummary(
- interface, interactionName, statePointIndex, *args
- )
+ self.printInterfaceSummary(interface, interactionName, statePointIndex)
# maybe make this a context manager
if printMemUsage:
@@ -528,6 +524,9 @@ def _interactAll(self, interactionName, activeInterfaces, *args):
)
)
+ # Allow inherited classes to clean up things after an interaction
+ self._finalizeInteract()
+
runLog.header(
"=========== Completed {} Event ===========\n".format(
interactionName + cycleNodeTag
@@ -536,41 +535,53 @@ def _interactAll(self, interactionName, activeInterfaces, *args):
return halt
- def printInterfaceSummary(self, interface, interactionName, statePointIndex, *args):
+ def _finalizeInteract(self):
+ """Member called after each interface has completed its interaction.
+
+ Useful for cleaning up data.
+ """
+ pass
+
+ def printInterfaceSummary(self, interface, interactionName, statePointIndex):
"""
Log which interaction point is about to be executed.
This looks better as multiple lines but it's a lot easier to grep as one line.
We leverage newlines instead of long banners to save disk space.
"""
- nodeInfo = self._expandCycleAndTimeNodeArgs(
- *args, interactionName=interactionName
- )
+ nodeInfo = self._expandCycleAndTimeNodeArgs(interactionName)
line = "=========== {:02d} - {:30s} {:15s} ===========".format(
statePointIndex, interface.name, interactionName + nodeInfo
)
runLog.header(line)
- @staticmethod
- def _expandCycleAndTimeNodeArgs(*args, interactionName):
+ def _expandCycleAndTimeNodeArgs(self, interactionName):
"""Return text annotating information for current run event.
Notes
-----
- Init, BOL, EOL: empty
- - Everynode: (cycle, time node)
- - BOC: cycle number
- - Coupling: iteration number
+ - Everynode: cycle, time node
+ - BOC, EOC: cycle number
+ - Coupled: cycle, time node, iteration number
"""
- cycleNodeInfo = ""
- if args:
- if len(args) == 1:
- if interactionName == "Coupled":
- cycleNodeInfo = f" - iteration {args[0]}"
- elif interactionName in ("BOC", "EOC"):
- cycleNodeInfo = f" - cycle {args[0]}"
- else:
- cycleNodeInfo = f" - cycle {args[0]}, node {args[1]}"
+ if interactionName == "Coupled":
+ cycleNodeInfo = (
+ f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, "
+ f"year {'{0:.2f}'.format(self.r.p.time)} - iteration "
+ f"{self.r.core.p.coupledIteration}"
+ )
+ elif interactionName in ("BOC", "EOC"):
+ cycleNodeInfo = f" - timestep: cycle {self.r.p.cycle}"
+ # - timestep: cycle 2
+ elif interactionName in ("Init", "BOL", "EOL"):
+ cycleNodeInfo = ""
+ else:
+ cycleNodeInfo = (
+ f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, "
+ f"year {'{0:.2f}'.format(self.r.p.time)}"
+ )
+
return cycleNodeInfo
def _debugDB(self, interactionName, interfaceName, statePointIndex=0):
@@ -647,7 +658,7 @@ def interactAllEOC(self, cycle, excludedInterfaceNames=()):
activeInterfaces = self.getActiveInterfaces("EOC", excludedInterfaceNames)
self._interactAll("EOC", activeInterfaces, cycle)
- def interactAllEOL(self):
+ def interactAllEOL(self, excludedInterfaceNames=()):
"""
Run interactEOL for all enabled interfaces.
@@ -658,7 +669,7 @@ def interactAllEOL(self):
order. This allows, for example, an interface that must run
first to also run last.
"""
- activeInterfaces = self.getActiveInterfaces("EOL")
+ activeInterfaces = self.getActiveInterfaces("EOL", excludedInterfaceNames)
self._interactAll("EOL", activeInterfaces)
def interactAllCoupled(self, coupledIteration):
@@ -1017,7 +1028,7 @@ def getActiveInterfaces(
# Ensure the name of the interface isn't in some exclusion list.
nameCheck = lambda i: True
- if interactState == "EveryNode" or interactState == "EOC":
+ if interactState in ("EveryNode", "EOC", "EOL"):
nameCheck = lambda i: i.name not in excludedInterfaceNames
elif interactState == "BOC" and cycle < self.cs[CONF_DEFERRED_INTERFACES_CYCLE]:
nameCheck = lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES]
@@ -1067,6 +1078,8 @@ def detach(self):
"""
if self.r:
self.r.o = None
+ for comp in self.r:
+ comp.parent = None
self.r = None
for i in self.interfaces:
i.o = None
@@ -1243,16 +1256,6 @@ def snapshotRequest(self, cycle, node, iteration=None):
pathTools.copyOrWarn(
"Loading definition for snapshot", self.cs[CONF_LOADING_FILE], newFolder
)
- pathTools.copyOrWarn(
- "Flow history for snapshot",
- self.cs.caseTitle + ".flow_history.txt",
- newFolder,
- )
- pathTools.copyOrWarn(
- "Pressure history for snapshot",
- self.cs.caseTitle + ".pressure_history.txt",
- newFolder,
- )
@staticmethod
def setStateToDefault(cs):
diff --git a/armi/operators/operatorMPI.py b/armi/operators/operatorMPI.py
index 191ae7d4d..bb9761653 100644
--- a/armi/operators/operatorMPI.py
+++ b/armi/operators/operatorMPI.py
@@ -163,6 +163,8 @@ def workerOperate(self):
note = context.MPI_COMM.bcast("wait", root=0)
if note != "wait":
raise RuntimeError('did not get "wait". Got {0}'.format(note))
+ elif cmd == "reset":
+ runLog.extra("Workers are being reset.")
else:
# we don't understand the command on our own. check the interfaces
# this allows all interfaces to have their own custom operation code.
@@ -193,7 +195,7 @@ def workerOperate(self):
pm = getPluginManager()
resetFlags = pm.hook.mpiActionRequiresReset(cmd=cmd)
# only reset if all the plugins agree to reset
- if all(resetFlags):
+ if all(resetFlags) or cmd == "reset":
self._resetWorker()
# might be an mpi action which has a reactor and everything, preventing
@@ -201,6 +203,21 @@ def workerOperate(self):
del cmd
gc.collect()
+ def _finalizeInteract(self):
+ """Inherited member called after each interface has completed its interact.
+
+ This will force all the workers to clear their reactor data so that it
+ isn't carried around to the next interact.
+
+ Notes
+ -----
+ This is only called on the root processor. Worker processors will know
+ what to do with the "reset" broadcast.
+ """
+ if context.MPI_SIZE > 1:
+ context.MPI_COMM.bcast("reset", root=0)
+ runLog.extra("Workers have been reset.")
+
def _resetWorker(self):
"""
Clear out the reactor on the workers to start anew.
@@ -214,17 +231,23 @@ def _resetWorker(self):
.. warning:: This should build empty non-core systems too.
"""
- xsGroups = self.getInterface("xsGroups")
- if xsGroups:
- xsGroups.clearRepresentativeBlocks()
+ # Nothing to do if we never had anything
+ if self.r is None:
+ return
+
cs = self.cs
bp = self.r.blueprints
spatialGrid = self.r.core.spatialGrid
+ spatialGrid.armiObject = None
+ xsGroups = self.getInterface("xsGroups")
+ if xsGroups:
+ xsGroups.clearRepresentativeBlocks()
self.detach()
self.r = reactors.Reactor(cs.caseTitle, bp)
core = reactors.Core("Core")
self.r.add(core)
core.spatialGrid = spatialGrid
+ core.spatialGrid.armiObject = core
self.reattach(self.r, cs)
@staticmethod
diff --git a/armi/operators/settingsValidation.py b/armi/operators/settingsValidation.py
index 6680b01b0..949b0e09b 100644
--- a/armi/operators/settingsValidation.py
+++ b/armi/operators/settingsValidation.py
@@ -1,4 +1,4 @@
-# Copyright 2019 TerraPower, LLC
+# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,795 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""This is a placeholder file that only exists to provide backwards compatibility.
-"""
-A system to check user settings for validity and provide users with meaningful
-suggestions to fix.
+Notes
+-----
+The actual ``settingsValidation.py`` module has been move to ``armi/settings/``. For now, this file will
+provide backwards compatibility.
-This allows developers to define a rich set of rules and suggestions for user settings.
-These then pop up during initialization of a run, either on the command line or as
-dialogues in the GUI. They say things like: "Your ___ setting has the value ___, which
-is impossible. Would you like to switch to ___?"
+Warning
+-------
+DeprecationWarning: This file will disappear in early 2025.
"""
-import itertools
-import os
-import re
-import shutil
-
-from armi import context
-from armi import getPluginManagerOrFail
-from armi import runLog
-from armi.physics import neutronics
-from armi.reactor import geometry
-from armi.reactor import systemLayoutInput
-from armi.settings.settingsIO import (
- prompt,
- RunLogPromptCancel,
- RunLogPromptUnresolvable,
-)
-from armi.utils import directoryChangers
-from armi.utils import pathTools
-from armi.utils.mathematics import expandRepeatedFloats
-
-
-class Query:
- """
- An individual setting validator.
-
- .. impl:: Rules to validate and customize a setting's behavior.
- :id: I_ARMI_SETTINGS_RULES
- :implements: R_ARMI_SETTINGS_RULES
-
- This class is meant to represent a generic validation test against a setting.
- The goal is: developers create new settings and they want to make sure those
- settings are used correctly. As an implementation, users pass in a
- ``condition`` function to this class that returns ``True`` or ``False`` based
- on the setting name and value. And then this class has a ``resolve`` method
- which tests if the condition is met. Optionally, this class also contains a
- ``correction`` function that allows users to automatically correct a bad
- setting, if the developers can find a clear path forward.
- """
-
- def __init__(self, condition, statement, question, correction):
- """
- Construct a query.
-
- Parameters
- ----------
- condition : callable
- A callable that returns True or False. If True, then the query activates
- its question and potential correction.
- statement : str
- A statement of the problem indicated by a True condition
- question : str
- A question asking the user for confirmation of the proposed
- fix.
- correction : callable
- A callable that when called fixes the situation. See
- :py:meth:`Inspector.NO_ACTION` for no-ops.
- """
- self.condition = condition
- self.statement = statement
- self.question = question
- self.correction = correction
- # True if the query is `passed` and does not result in an immediate failure
- self.corrected = False
- self._passed = False
- self.autoResolved = True
-
- def __repr__(self):
- # Add representation so that it's possible to identify which one
- # is being referred to when there are errors.
- return "".format(self.statement)
-
- def __bool__(self):
- try:
- return bool(self.condition())
- except TypeError:
- runLog.error(
- f"Invalid setting validation query. Update validator for: {self})"
- )
- raise
-
- __nonzero__ = __bool__ # Python2 compatibility
-
- def isCorrective(self):
- return self.correction is not Inspector.NO_ACTION
-
- def resolve(self):
- """Standard i/o prompt for resolution of an individual query."""
- if context.MPI_RANK != 0:
- return
-
- if self.condition():
- try:
- if self.isCorrective():
- try:
- make_correction = prompt(
- "INSPECTOR: " + self.statement,
- self.question,
- "YES_NO",
- "NO_DEFAULT",
- "CANCEL",
- )
- if make_correction:
- self.correction()
- self.corrected = True
- self._passed = True
- except RunLogPromptCancel as ki:
- raise KeyboardInterrupt from ki
- else:
- try:
- continue_submission = prompt(
- "INSPECTOR: " + self.statement,
- "Continue?",
- "YES_NO",
- "NO_DEFAULT",
- "CANCEL",
- )
- if not continue_submission:
- raise KeyboardInterrupt
- except RunLogPromptCancel as ki:
- raise KeyboardInterrupt from ki
- except RunLogPromptUnresolvable:
- self.autoResolved = False
- self._passed = True
-
-
-class Inspector:
- """
- This manages queries which assert certain states of the data model, generally presenting
- themselves to the user, offering information on the potential problem, a question
- and the action to take on an affirmative and negative answer from the user.
-
- In practice very useful for making sure setting values are as intended and without
- bad interplay with one another.
-
- One Inspector will contain multiple Queries and be associated directly with an
- :py:class:`~armi.operators.operator.Operator`.
- """
-
- @staticmethod
- def NO_ACTION():
- """Convenience callable used to generate Queries that can't be easily auto-resolved."""
- return None
-
- def __init__(self, cs):
- """
- Construct an inspector.
-
- Parameters
- ----------
- cs : Settings
- """
- self.queries = []
- self.cs = cs
- self.geomType = None
- self.coreSymmetry = None
- self._inspectBlueprints()
- self._setGeomType()
- self._inspectSettings()
-
- # Gather and attach validators from all plugins
- # This runs on all registered plugins, not just active ones.
- pluginQueries = getPluginManagerOrFail().hook.defineSettingsValidators(
- inspector=self
- )
- for queries in pluginQueries:
- self.queries.extend(queries)
-
- def run(self, cs=None):
- """
- Run through each query and deal with it if possible.
-
- Returns
- -------
- correctionsMade : bool
- Whether or not anything was updated.
-
- Raises
- ------
- RuntimeError
- When a programming error causes queries to loop.
- """
- if context.MPI_RANK != 0:
- return False
-
- # the following attribute changes will alter what the queries investigate when resolved
- correctionsMade = False
- self.cs = cs or self.cs
- runLog.debug("{} executing queries.".format(self.__class__.__name__))
- if not any(self.queries):
- runLog.debug(
- "{} found no problems with the current state.".format(
- self.__class__.__name__
- )
- )
- else:
- for query in self.queries:
- query.resolve()
- if query.corrected:
- correctionsMade = True
- issues = [
- query
- for query in self.queries
- if query and (query.isCorrective() and not query._passed)
- ]
- if any(issues):
- # something isn't resolved or was unresolved by changes
- raise RuntimeError(
- "The input inspection did not resolve all queries, "
- "some issues are creating cyclic resolutions: {}".format(issues)
- )
- runLog.debug("{} has finished querying.".format(self.__class__.__name__))
-
- if correctionsMade:
- # find unused file path to store original settings as to avoid overwrite
- strSkeleton = "{}_old".format(self.cs.path.split(".yaml")[0])
- for num in itertools.count():
- if num == 0:
- renamePath = f"{strSkeleton}.yaml"
- else:
- renamePath = f"{strSkeleton}{num}.yaml"
- if not self._csRelativePathExists(renamePath):
- break
- # preserve old file before saving settings file
- runLog.important(
- f"Preserving original settings file by renaming `{renamePath}`"
- )
- shutil.copy(self.cs.path, renamePath)
- # save settings file
- self.cs.writeToYamlFile(self.cs.path)
-
- return correctionsMade
-
- def addQuery(self, condition, statement, question, correction):
- """Convenience method, query must be resolved, else run fails."""
- if not callable(correction):
- raise ValueError(
- 'Query for "{}" malformed. Expecting callable.'.format(statement)
- )
- self.queries.append(Query(condition, statement, question, correction))
-
- def addQueryBadLocationWillLikelyFail(self, settingName):
- """Add a query indicating the current path for ``settingName`` does not exist and will likely fail."""
- self.addQuery(
- lambda: not os.path.exists(pathTools.armiAbsPath(self.cs[settingName])),
- "Setting {} points to nonexistent location\n{}\nFailure extremely likely".format(
- settingName, self.cs[settingName]
- ),
- "",
- self.NO_ACTION,
- )
-
- def addQueryCurrentSettingMayNotSupportFeatures(self, settingName):
- """Add a query that the current value for ``settingName`` may not support certain features."""
- self.addQuery(
- lambda: self.cs[settingName] != self.cs.getSetting(settingName).default,
- "{} set as:\n{}\nUsing this location instead of the default location\n{}\n"
- "may not support certain functions.".format(
- settingName,
- self.cs[settingName],
- self.cs.getSetting(settingName).default,
- ),
- "Revert to default location?",
- lambda: self._assignCS(
- settingName, self.cs.getSetting(settingName).default
- ),
- )
-
- def _assignCS(self, key, value):
- """Lambda assignment workaround."""
- # this type of assignment works, but be mindful of
- # scoping when trying different methods
- runLog.extra(f"Updating setting `{key}` to `{value}`")
- self.cs[key] = value
-
- def _raise(self):
- raise KeyboardInterrupt("Input inspection has been interrupted.")
-
- def _inspectBlueprints(self):
- """Blueprints early error detection and old format conversions."""
- from armi.physics.neutronics.settings import CONF_LOADING_FILE
-
- # if there is a blueprints object, we don't need to check for a file
- if self.cs.filelessBP:
- return
-
- self.addQuery(
- lambda: not self.cs[CONF_LOADING_FILE],
- "No blueprints file loaded. Run will probably fail.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: not self._csRelativePathExists(self.cs[CONF_LOADING_FILE]),
- "Blueprints file {} not found. Run will fail.".format(
- self.cs[CONF_LOADING_FILE]
- ),
- "",
- self.NO_ACTION,
- )
-
- def _csRelativePathExists(self, filename):
- csRelativePath = self._csRelativePath(filename)
- return os.path.exists(csRelativePath) and os.path.isfile(csRelativePath)
-
- def _csRelativePath(self, filename):
- return os.path.join(self.cs.inputDirectory, filename)
-
- def _setGeomType(self):
- if self.cs["geomFile"]:
- with directoryChangers.DirectoryChanger(
- self.cs.inputDirectory, dumpOnException=False
- ):
- geom = systemLayoutInput.SystemLayoutInput()
- geom.readGeomFromFile(self.cs["geomFile"])
-
- self.geomType, self.coreSymmetry = geom.geomType, geom.symmetry
-
- def _correctCyclesToZeroBurnup(self):
- self._assignCS("nCycles", 1)
- self._assignCS("burnSteps", 0)
- self._assignCS("cycleLength", None)
- self._assignCS("cycleLengths", None)
- self._assignCS("availabilityFactor", None)
- self._assignCS("availabilityFactors", None)
- self._assignCS("cycles", [])
-
- def _checkForBothSimpleAndDetailedCyclesInputs(self):
- """
- Because the only way to check if a setting has been "entered" is to check
- against the default, if the user specifies all the simple cycle settings
- _exactly_ as the defaults, this won't be caught. But, it would be very
- coincidental for the user to _specify_ all the default values when
- performing any real analysis.
-
- Also, we must bypass the `Settings` getter and reach directly
- into the underlying `__settings` dict to avoid triggering an error
- at this stage in the run. Otherwise an error will inherently be raised
- if the detailed cycles input is used because the simple cycles inputs
- have defaults. We don't care that those defaults are there, we only
- have a problem with those defaults being _used_, which will be caught
- later on.
- """
- bothCyclesInputTypesPresent = (
- self.cs._Settings__settings["cycleLength"].value
- != self.cs._Settings__settings["cycleLength"].default
- or self.cs._Settings__settings["cycleLengths"].value
- != self.cs._Settings__settings["cycleLengths"].default
- or self.cs._Settings__settings["burnSteps"].value
- != self.cs._Settings__settings["burnSteps"].default
- or self.cs._Settings__settings["availabilityFactor"].value
- != self.cs._Settings__settings["availabilityFactor"].default
- or self.cs._Settings__settings["availabilityFactors"].value
- != self.cs._Settings__settings["availabilityFactors"].default
- or self.cs._Settings__settings["powerFractions"].value
- != self.cs._Settings__settings["powerFractions"].default
- ) and self.cs["cycles"] != []
-
- return bothCyclesInputTypesPresent
-
- def _inspectSettings(self):
- """Check settings for inconsistencies."""
- from armi import operators
- from armi.physics.neutronics.settings import (
- CONF_BC_COEFFICIENT,
- CONF_BOUNDARIES,
- CONF_XS_KERNEL,
- CONF_XS_SCATTERING_ORDER,
- )
-
- self.addQueryBadLocationWillLikelyFail("operatorLocation")
-
- self.addQuery(
- lambda: self.cs["outputFileExtension"] == "pdf" and self.cs["genReports"],
- "Output files of '.pdf' format are not supported by the reporting HTML generator. '.pdf' "
- "images will not be included.",
- "Switch to '.png'?",
- lambda: self._assignCS("outputFileExtension", "png"),
- )
-
- self.addQuery(
- lambda: (
- (
- self.cs["beta"]
- and isinstance(self.cs["beta"], list)
- and not self.cs["decayConstants"]
- )
- or (self.cs["decayConstants"] and not self.cs["beta"])
- ),
- "Both beta components and decay constants should be provided if either are "
- "being supplied.",
- "",
- self.NO_ACTION,
- ),
-
- self.addQuery(
- lambda: self.cs["skipCycles"] > 0 and not self.cs["reloadDBName"],
- "You have chosen to do a restart case without specifying a database to load from. "
- "Run will load from output files, if they exist but burnup, etc. will not be updated.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["runType"] != operators.RunTypes.SNAPSHOTS
- and self.cs["loadStyle"] == "fromDB"
- and self.cs["startCycle"] == 0
- and self.cs["startNode"] == 0,
- "Starting from cycle 0, and time node 0 was chosen. Restart runs load from "
- "the time node just before the restart. There is no time node to load from "
- "before cycle 0 node 0. Either switch to the snapshot operator, start from "
- "a different time step or load from inputs rather than database as "
- "`loadStyle`.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["runType"] == operators.RunTypes.SNAPSHOTS
- and not (self.cs["dumpSnapshot"] or self.cs["defaultSnapshots"]),
- "The Snapshots operator was specified, but no dump snapshots were chosen."
- "Please specify snapshot steps with the `dumpSnapshot` setting.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs.caseTitle.lower()
- == os.path.splitext(os.path.basename(self.cs["reloadDBName"].lower()))[0],
- "Snapshot DB ({0}) and main DB ({1}) cannot have the same name."
- "Change name of settings file and resubmit.".format(
- self.cs["reloadDBName"], self.cs.caseTitle
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["reloadDBName"] != ""
- and not os.path.exists(self.cs["reloadDBName"]),
- "Reload database {} does not exist. \nPlease point to an existing DB, "
- "or set to empty and load from input.".format(self.cs["reloadDBName"]),
- "",
- self.NO_ACTION,
- )
-
- def _willBeCopiedFrom(fName):
- return any(
- fName == os.path.split(copyFile)[1]
- for copyFile in self.cs["copyFilesFrom"]
- )
-
- self.addQuery(
- lambda: self.cs["explicitRepeatShuffles"]
- and not self._csRelativePathExists(self.cs["explicitRepeatShuffles"])
- and not _willBeCopiedFrom(self.cs["explicitRepeatShuffles"]),
- "The specified repeat shuffle file `{0}` does not exist, and won't be copied. "
- "Run will crash.".format(self.cs["explicitRepeatShuffles"]),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: not self.cs["power"] and not self.cs["powerDensity"],
- "No power or powerDensity set. You must always start by importing a base settings file.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["power"] > 0 and self.cs["powerDensity"] > 0,
- "The power and powerDensity are both set, please note the power will be used as the truth.",
- "",
- self.NO_ACTION,
- )
-
- # The gamma cross sections generated for MC2-3 by ANL were done with NJOY with
- # P3 scattering. MC2-3 would have to be modified and the gamma cross sections
- # re-generated with NJOY for MC2-3 to allow any other scattering order with
- # gamma cross sections enabled.
- self.addQuery(
- lambda: (
- "MC2v3" in self.cs[CONF_XS_KERNEL]
- and neutronics.gammaXsAreRequested(self.cs)
- and self.cs[CONF_XS_SCATTERING_ORDER] != 3
- ),
- "MC2-3 will crash if a scattering order is not set to 3 when generating gamma XS.",
- f"Would you like to set the `{CONF_XS_SCATTERING_ORDER}` to 3?",
- lambda: self._assignCS(CONF_XS_SCATTERING_ORDER, 3),
- )
-
- self.addQuery(
- lambda: self.cs["outputCacheLocation"]
- and not os.path.exists(self.cs["outputCacheLocation"]),
- "`outputCacheLocation` path {} does not exist. Please specify a location that exists.".format(
- self.cs["outputCacheLocation"]
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: (
- not self.cs["tightCoupling"]
- and self.cs["tightCouplingMaxNumIters"] != 4
- ),
- "You've requested a non default number of tight coupling iterations but left tightCoupling: False."
- "Do you want to set tightCoupling to True?",
- "",
- lambda: self._assignCS("tightCoupling", True),
- )
-
- self.addQuery(
- lambda: (not self.cs["tightCoupling"] and self.cs["tightCouplingSettings"]),
- "You've requested non default tight coupling settings but tightCoupling: False."
- "Do you want to set tightCoupling to True?",
- "",
- lambda: self._assignCS("tightCoupling", True),
- )
-
- self.addQuery(
- lambda: self.cs["startCycle"]
- and self.cs["nCycles"] < self.cs["startCycle"],
- "nCycles must be greater than or equal to startCycle in restart cases. nCycles"
- " is the _total_ number of cycles in the completed run (i.e. restarted +"
- " continued cycles). Please update the case settings.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["nCycles"] in [0, None],
- "Cannot run 0 cycles. Set burnSteps to 0 to activate a single time-independent case.",
- "Set 1 cycle and 0 burnSteps for single time-independent case?",
- self._correctCyclesToZeroBurnup,
- )
-
- self.addQuery(
- self._checkForBothSimpleAndDetailedCyclesInputs,
- "If specifying detailed cycle history with `cycles`, you may not"
- " also use any of the simple cycle history inputs `cycleLength(s)`,"
- " `burnSteps`, `availabilityFactor(s)`, or `powerFractions`."
- " Using the detailed cycle history.",
- "",
- self.NO_ACTION,
- )
-
- def _factorsAreValid(factors, maxVal=1.0):
- try:
- expandedList = expandRepeatedFloats(factors)
- except (ValueError, IndexError):
- return False
- return (
- all(0.0 <= val <= maxVal for val in expandedList)
- and len(expandedList) == self.cs["nCycles"]
- )
-
- if self.cs["cycles"] == []:
- self.addQuery(
- lambda: (
- self.cs["availabilityFactors"]
- and not _factorsAreValid(self.cs["availabilityFactors"])
- ),
- "`availabilityFactors` was not set to a list compatible with the number of cycles. "
- "Please update input or use constant duration.",
- "Use constant availability factor specified in `availabilityFactor` setting?",
- lambda: self._assignCS("availabilityFactors", []),
- )
-
- self.addQuery(
- lambda: (
- self.cs["powerFractions"]
- and not _factorsAreValid(self.cs["powerFractions"])
- ),
- "`powerFractions` was not set to a compatible list. "
- "Please update input or use full power at all cycles.",
- "Use full power for all cycles?",
- lambda: self._assignCS("powerFractions", []),
- )
-
- self.addQuery(
- lambda: (
- self.cs["cycleLengths"]
- and not _factorsAreValid(self.cs["cycleLengths"], maxVal=1e10)
- ),
- "`cycleLengths` was not set to a list compatible with the number of cycles."
- " Please update input or use constant duration.",
- "Use constant cycle length specified in `cycleLength` setting?",
- lambda: self._assignCS("cycleLengths", []),
- )
-
- self.addQuery(
- lambda: (
- self.cs["runType"] == operators.RunTypes.STANDARD
- and self.cs["burnSteps"] == 0
- and (
- (
- len(self.cs["cycleLengths"]) > 1
- if self.cs["cycleLengths"] is not None
- else False
- )
- or self.cs["nCycles"] > 1
- )
- ),
- "Cannot run multi-cycle standard cases with 0 burnSteps per cycle. Please update settings.",
- "",
- self.NO_ACTION,
- )
-
- def decayCyclesHaveInputThatWillBeIgnored():
- """Check if there is any decay-related input that will be ignored."""
- try:
- powerFracs = expandRepeatedFloats(self.cs["powerFractions"])
- availabilities = expandRepeatedFloats(
- self.cs["availabilityFactors"]
- ) or ([self.cs["availabilityFactor"]] * self.cs["nCycles"])
- except: # noqa: bare-except
- return True
-
- # This will be a full decay step and any power fraction will be ignored. May be ok.
- return any(
- pf > 0.0 and af == 0.0 for pf, af in zip(powerFracs, availabilities)
- )
-
- self.addQuery(
- lambda: (
- self.cs["cycleLengths"]
- and self.cs["powerFractions"]
- and decayCyclesHaveInputThatWillBeIgnored()
- and not self.cs["cycles"]
- ),
- "At least one cycle has a non-zero power fraction but an availability of zero. Please "
- "update the input.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["operatorLocation"]
- and self.cs["runType"] != operators.RunTypes.STANDARD,
- "The `runType` setting is set to `{0}` but there is a `custom operator location` defined".format(
- self.cs["runType"]
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["operatorLocation"]
- and self.cs["runType"] != operators.RunTypes.STANDARD,
- "The `runType` setting is set to `{0}` but there is a `custom operator location` defined".format(
- self.cs["runType"]
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["skipCycles"] > 0
- and not os.path.exists(self.cs.caseTitle + ".restart.dat"),
- "This is a restart case, but the required restart file {0}.restart.dat is not found".format(
- self.cs.caseTitle
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["deferredInterfacesCycle"] > self.cs["nCycles"],
- "The deferred interface activation cycle exceeds set cycle occurrence. "
- "Interfaces will not be activated in this run!",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: (
- self.cs[CONF_BOUNDARIES] != neutronics.GENERAL_BC
- and self.cs[CONF_BC_COEFFICIENT]
- ),
- f"General neutronic boundary condition was not selected, but `{CONF_BC_COEFFICIENT}` was defined. "
- f"Please enable `Generalized` neutronic boundary condition or disable `{CONF_BC_COEFFICIENT}`.",
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["geomFile"]
- and str(self.geomType) not in geometry.VALID_GEOMETRY_TYPE,
- "{} is not a valid geometry Please update geom type on the geom file. "
- "Valid (case insensitive) geom types are: {}".format(
- self.geomType, geometry.VALID_GEOMETRY_TYPE
- ),
- "",
- self.NO_ACTION,
- )
-
- self.addQuery(
- lambda: self.cs["geomFile"]
- and not geometry.checkValidGeomSymmetryCombo(
- self.geomType, self.coreSymmetry
- ),
- "{}, {} is not a valid geometry and symmetry combination. Please update "
- "either geometry or symmetry on the geom file.".format(
- str(self.geomType), str(self.coreSymmetry)
- ),
- "",
- self.NO_ACTION,
- )
-
-
-def createQueryRevertBadPathToDefault(inspector, settingName, initialLambda=None):
- """
- Return a query to revert a bad path to its default.
-
- Parameters
- ----------
- inspector: Inspector
- the inspector who's settings are being queried
- settingName: str
- name of the setting to inspect
- initialLambda: None or callable function
- If ``None``, the callable argument for :py:meth:`addQuery` is does the setting's path exist.
- If more complicated callable arguments are needed, they can be passed in as the ``initialLambda`` setting.
- """
- if initialLambda is None:
- initialLambda = lambda: (
- not os.path.exists(pathTools.armiAbsPath(inspector.cs[settingName]))
- and inspector.cs.getSetting(settingName).offDefault
- ) # solution is to revert to default
-
- query = Query(
- initialLambda,
- "Setting {} points to a nonexistent location:\n{}".format(
- settingName, inspector.cs[settingName]
- ),
- "Revert to default location?",
- inspector.cs.getSetting(settingName).revertToDefault,
- )
- return query
-
-
-def validateVersion(versionThis: str, versionRequired: str) -> bool:
- """Helper function to allow users to verify that their version matches the settings file.
-
- Parameters
- ----------
- versionThis: str
- The version of this ARMI, App, or Plugin.
- This MUST be in the form: 1.2.3
- versionRequired: str
- The version to compare against, say in a Settings file.
- This must be in one of the forms: 1.2.3, 1.2, or 1
-
- Returns
- -------
- bool
- Does this version match the version in the Settings file/object?
- """
- fullV = "\d+\.\d+\.\d+"
- medV = "\d+\.\d+"
- minV = "\d+"
+# ruff: noqa: F401
- if versionRequired == "uncontrolled":
- # This default flag means we don't want to check the version.
- return True
- elif re.search(fullV, versionThis) is None:
- raise ValueError(
- "The input version ({0}) does not match the required format: {1}".format(
- versionThis, fullV
- )
- )
- elif re.search(fullV, versionRequired) is not None:
- return versionThis == versionRequired
- elif re.search(medV, versionRequired) is not None:
- return ".".join(versionThis.split(".")[:2]) == versionRequired
- elif re.search(minV, versionRequired) is not None:
- return versionThis.split(".")[0] == versionRequired
- else:
- raise ValueError(
- "The required version is not a valid format: {}".format(versionRequired)
- )
+from armi.settings.settingsValidation import createQueryRevertBadPathToDefault
+from armi.settings.settingsValidation import Inspector
+from armi.settings.settingsValidation import Query
diff --git a/armi/operators/snapshots.py b/armi/operators/snapshots.py
index f5865462f..575c8b606 100644
--- a/armi/operators/snapshots.py
+++ b/armi/operators/snapshots.py
@@ -62,8 +62,8 @@ def _mainOperate(self):
# update the snapshot requests if the user chose to load from a specific cycle/node
dbi = self.getInterface("database")
-
- lastTimeStep = snapshots[-1]
+ # database is excluded since SS writes by itself
+ excludeDB = ("database",)
for ssCycle, ssNode in snapshots:
runLog.important(
"Beginning snapshot ({0:02d}, {1:02d})".format(ssCycle, ssNode)
@@ -79,18 +79,17 @@ def _mainOperate(self):
if halt:
break
- # database is excluded since it writes at EOC
- self.interactAllEveryNode(
- ssCycle, ssNode, excludedInterfaceNames=("database",)
- )
+ # database is excluded since it writes after coupled
+ self.interactAllEveryNode(ssCycle, ssNode, excludedInterfaceNames=excludeDB)
self._performTightCoupling(ssCycle, ssNode, writeDB=False)
+ # tight coupling is done, now write to DB
+ dbi.writeDBEveryNode()
- # database is excluded at last snapshot since it writes at EOL
- exclude = ("database",) if (ssCycle, ssNode) == lastTimeStep else ()
- self.interactAllEOC(self.r.p.cycle, excludedInterfaceNames=exclude)
+ self.interactAllEOC(self.r.p.cycle)
# run things that happen at EOL, like reports, plotters, etc.
- self.interactAllEOL()
+ self.interactAllEOL(excludedInterfaceNames=excludeDB)
+ dbi.closeDB() # dump the database to file
runLog.important("Done with ARMI snapshots case.")
@staticmethod
diff --git a/armi/operators/tests/test_operatorSnapshots.py b/armi/operators/tests/test_operatorSnapshots.py
index 36d2f6d52..cb07aaab3 100644
--- a/armi/operators/tests/test_operatorSnapshots.py
+++ b/armi/operators/tests/test_operatorSnapshots.py
@@ -32,13 +32,18 @@ def setUp(self):
newSettings["branchVerbosity"] = "important"
newSettings["nCycles"] = 1
newSettings["dumpSnapshot"] = ["000000", "008000", "016005"]
- o1, self.r = test_reactors.loadTestReactor(customSettings=newSettings)
+ o1, self.r = test_reactors.loadTestReactor(
+ customSettings=newSettings,
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
self.o = OperatorSnapshots(o1.cs)
self.o.r = self.r
# mock a Database Interface
self.dbi = DatabaseInterface(self.r, o1.cs)
self.dbi.loadState = lambda c, n: None
+ self.dbi.writeDBEveryNode = lambda: None
+ self.dbi.closeDB = lambda: None
def test_atEOL(self):
self.assertFalse(self.o.atEOL)
@@ -58,7 +63,7 @@ def test_mainOperate(self):
self.assertEqual(self.r.core.p.power, 0.0)
self.o._mainOperate()
- self.assertEqual(self.r.core.p.power, 100000000.0)
+ self.assertEqual(self.r.core.p.power, 1000000.0)
def test_createInterfaces(self):
self.assertEqual(len(self.o.interfaces), 0)
diff --git a/armi/operators/tests/test_operators.py b/armi/operators/tests/test_operators.py
index 581a2a923..54199a5b9 100644
--- a/armi/operators/tests/test_operators.py
+++ b/armi/operators/tests/test_operators.py
@@ -62,7 +62,9 @@ class InterfaceC(Interface):
class OperatorTests(unittest.TestCase):
def setUp(self):
- self.o, self.r = test_reactors.loadTestReactor()
+ self.o, self.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.activeInterfaces = [ii for ii in self.o.interfaces if ii.enabled()]
def test_operatorData(self):
@@ -169,7 +171,9 @@ def test_addInterfaceSubclassCollision(self):
self.assertEqual(self.o.getInterface("Third"), interfaceC)
def test_interfaceIsActive(self):
- self.o, _r = test_reactors.loadTestReactor()
+ self.o, _r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.assertTrue(self.o.interfaceIsActive("main"))
self.assertFalse(self.o.interfaceIsActive("Fake-o"))
@@ -203,15 +207,28 @@ def test_getActiveInterfaces(self):
self.assertIn("history", interfaceNames)
self.assertNotIn("xsGroups", interfaceNames)
- # Test EOL
- interfaces = self.o.getActiveInterfaces("EOL")
- self.assertEqual(interfaces[-1].name, "main")
-
# Test Coupled
interfaces = self.o.getActiveInterfaces("Coupled")
for test, ref in zip(interfaces, self.activeInterfaces):
self.assertEqual(test.name, ref.name)
+ # Test EOL
+ interfaces = self.o.getActiveInterfaces("EOL")
+ self.assertEqual(interfaces[-1].name, "main")
+
+ # Test excludedInterfaceNames
+ excludedInterfaceNames = ["fissionProducts", "fuelHandler", "xsGroups"]
+ interfaces = self.o.getActiveInterfaces(
+ "EOL", excludedInterfaceNames=excludedInterfaceNames
+ )
+ interfaceNames = [ii.name for ii in interfaces]
+ self.assertIn("history", interfaceNames)
+ self.assertIn("main", interfaceNames)
+ self.assertIn("snapshot", interfaceNames)
+ self.assertNotIn("fissionProducts", interfaceNames)
+ self.assertNotIn("fuelHandler", interfaceNames)
+ self.assertNotIn("xsGroups", interfaceNames)
+
def test_loadStateError(self):
"""The ``loadTestReactor()`` test tool does not have any history in the DB to load from."""
# a first, simple test that this method fails correctly
@@ -236,38 +253,20 @@ def test_snapshotRequest(self, fakeDirList, fakeCopy):
with mockRunLogs.BufferLog() as mock:
self.o.snapshotRequest(0, 1)
self.assertIn("ISOTXS-c0", mock.getStdout())
- self.assertIn(
- "DIF3D input for snapshot: armiRun-flux-c0n1.inp",
- mock.getStdout(),
- )
- self.assertIn(
- "DIF3D output for snapshot: armiRun-flux-c0n1.out",
- mock.getStdout(),
- )
+ self.assertIn("DIF3D input for snapshot", mock.getStdout())
self.assertIn("Shuffle logic for snapshot", mock.getStdout())
self.assertIn("Geometry file for snapshot", mock.getStdout())
self.assertIn("Loading definition for snapshot", mock.getStdout())
- self.assertIn("Flow history for snapshot", mock.getStdout())
- self.assertIn("Pressure history for snapshot", mock.getStdout())
self.assertTrue(os.path.exists("snapShot0_1"))
with TemporaryDirectoryChanger():
with mockRunLogs.BufferLog() as mock:
self.o.snapshotRequest(0, 2, iteration=1)
self.assertIn("ISOTXS-c0", mock.getStdout())
- self.assertIn(
- "DIF3D input for snapshot: armiRun-flux-c0n2i1.inp",
- mock.getStdout(),
- )
- self.assertIn(
- "DIF3D output for snapshot: armiRun-flux-c0n2i1.out",
- mock.getStdout(),
- )
+ self.assertIn("DIF3D input for snapshot", mock.getStdout())
self.assertIn("Shuffle logic for snapshot", mock.getStdout())
self.assertIn("Geometry file for snapshot", mock.getStdout())
self.assertIn("Loading definition for snapshot", mock.getStdout())
- self.assertIn("Flow history for snapshot", mock.getStdout())
- self.assertIn("Pressure history for snapshot", mock.getStdout())
self.assertTrue(os.path.exists("snapShot0_2"))
@@ -287,7 +286,7 @@ def test_createOperator(self):
# validate some more nitty-gritty operator details come from settings
burnStepsSetting = cs["burnSteps"]
- if type(burnStepsSetting) != list:
+ if type(burnStepsSetting) is not list:
burnStepsSetting = [burnStepsSetting]
self.assertEqual(o.burnSteps, burnStepsSetting)
self.assertEqual(o.maxBurnSteps, max(burnStepsSetting))
@@ -412,7 +411,8 @@ def test_computeTightCouplingConvergence(self):
Notes
-----
- - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the prescribed convergence criteria)
+ - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the
+ prescribed convergence criteria)
- Assertion #2: ensure that eps is (prevIterKeff - currIterKeff)
"""
prevIterKeff = 0.9
@@ -552,32 +552,46 @@ def test_getMaxBurnSteps(self):
class TestInterfaceAndEventHeaders(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.o, cls.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ customSettings={CONF_TIGHT_COUPLING: True},
+ )
+ cls.r.p.cycle = 0
+ cls.r.p.timeNode = 1
+ cls.r.p.time = 11.01
+ cls.r.core.p.coupledIteration = 7
+
def test_expandCycleAndTimeNodeArgs_Empty(self):
- """When *args are empty, cycleNodeInfo should be an empty string."""
+ """When cycleNodeInfo should be an empty string."""
for task in ["Init", "BOL", "EOL"]:
self.assertEqual(
- Operator._expandCycleAndTimeNodeArgs(interactionName=task), ""
+ self.o._expandCycleAndTimeNodeArgs(interactionName=task), ""
)
- def test_expandCycleAndTimeNodeArgs_OneArg(self):
- """When *args is a single value, cycleNodeInfo should return the right string."""
- cycle = 0
+ def test_expandCycleAndTimeNodeArgs_Cycle(self):
+ """When cycleNodeInfo should return only the cycle."""
for task in ["BOC", "EOC"]:
self.assertEqual(
- Operator._expandCycleAndTimeNodeArgs(cycle, interactionName=task),
- f" - cycle {cycle}",
+ self.o._expandCycleAndTimeNodeArgs(interactionName=task),
+ f" - timestep: cycle {self.r.p.cycle}",
)
+
+ def test_expandCycleAndTimeNodeArgs_EveryNode(self):
+ """When cycleNodeInfo should return the cycle and node."""
self.assertEqual(
- Operator._expandCycleAndTimeNodeArgs(cycle, interactionName="Coupled"),
- f" - iteration {cycle}",
+ self.o._expandCycleAndTimeNodeArgs(interactionName="EveryNode"),
+ f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, "
+ f"year {'{0:.2f}'.format(self.r.p.time)}",
)
- def test_expandCycleAndTimeNodeArgs_TwoArg(self):
- """When *args is two values, cycleNodeInfo should return the right string."""
- cycle, timeNode = 0, 0
+ def test_expandCycleAndTimeNodeArgs_Coupled(self):
+ """When cycleNodeInfo should return the cycle, node, and iteration number."""
self.assertEqual(
- Operator._expandCycleAndTimeNodeArgs(
- cycle, timeNode, interactionName="EveryNode"
+ self.o._expandCycleAndTimeNodeArgs(interactionName="Coupled"),
+ (
+ f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year "
+ f"{'{0:.2f}'.format(self.r.p.time)} - iteration {self.r.core.p.coupledIteration}"
),
- f" - cycle {cycle}, node {timeNode}",
)
diff --git a/armi/physics/fuelCycle/assemblyRotationAlgorithms.py b/armi/physics/fuelCycle/assemblyRotationAlgorithms.py
index a56d30827..30c87ba2e 100644
--- a/armi/physics/fuelCycle/assemblyRotationAlgorithms.py
+++ b/armi/physics/fuelCycle/assemblyRotationAlgorithms.py
@@ -19,8 +19,10 @@
These algorithms are defined in assemblyRotationAlgorithms.py, but they are used in:
``FuelHandler.outage()``.
-.. warning:: Nothing should do in this file, but rotation algorithms.
+.. warning:: Nothing should go in this file, but rotation algorithms.
"""
+import math
+
from armi import runLog
from armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import (
getOptimalAssemblyOrientation,
@@ -28,8 +30,13 @@
from armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY
+def _rotationNumberToRadians(rot: int) -> float:
+ """Convert a rotation number to radians, assuming a HexAssembly."""
+ return rot * math.pi / 3
+
+
def buReducingAssemblyRotation(fh):
- r"""
+ """
Rotates all detail assemblies to put the highest bu pin in the lowest power orientation.
Parameters
@@ -48,31 +55,38 @@ def buReducingAssemblyRotation(fh):
aNow = fh.r.core.getAssemblyWithStringLocation(aPrev.lastLocationLabel)
# no point in rotation if there's no pin detail
if aNow in hist.getDetailAssemblies():
- rot = getOptimalAssemblyOrientation(aNow, aPrev)
- aNow.rotatePins(rot) # rot = integer between 0 and 5
+ _rotateByComparingLocations(aNow, aPrev)
numRotated += 1
- # Print out rotation operation (mainly for testing)
- # hex indices (i,j) = (ring,pos)
- (i, j) = aNow.spatialLocator.getRingPos()
- runLog.important(
- "Rotating Assembly ({0},{1}) to Orientation {2}".format(i, j, rot)
- )
- # rotate NON-MOVING assemblies (stationary)
if fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:
for a in hist.getDetailAssemblies():
if a not in fh.moved:
- rot = getOptimalAssemblyOrientation(a, a)
- a.rotatePins(rot) # rot = integer between 0 and 6
+ _rotateByComparingLocations(a, a)
numRotated += 1
- (i, j) = a.spatialLocator.getRingPos()
- runLog.important(
- "Rotating Assembly ({0},{1}) to Orientation {2}".format(i, j, rot)
- )
runLog.info("Rotated {0} assemblies".format(numRotated))
+def _rotateByComparingLocations(aNow, aPrev):
+ """Rotate an assembly based on its previous location.
+
+ Parameters
+ ----------
+ aNow : Assembly
+ Assembly to be rotated
+ aPrev : Assembly
+ Assembly that previously occupied the location of this assembly.
+ If ``aNow`` has not been moved, this should be ``aNow``
+ """
+ rot = getOptimalAssemblyOrientation(aNow, aPrev)
+ radians = _rotationNumberToRadians(rot)
+ aNow.rotate(radians)
+ (ring, pos) = aNow.spatialLocator.getRingPos()
+ runLog.important(
+ "Rotating Assembly ({0},{1}) to Orientation {2}".format(ring, pos, rot)
+ )
+
+
def simpleAssemblyRotation(fh):
"""
Rotate all pin-detail assemblies that were just shuffled by 60 degrees.
@@ -98,13 +112,14 @@ def simpleAssemblyRotation(fh):
runLog.info("Rotating assemblies by 60 degrees")
numRotated = 0
hist = fh.o.getInterface("history")
+ rot = math.radians(60)
for a in hist.getDetailAssemblies():
if a in fh.moved or fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:
- a.rotatePins(1)
+ a.rotate(rot)
numRotated += 1
- i, j = a.spatialLocator.getRingPos() # hex indices (i,j) = (ring,pos)
+ ring, pos = a.spatialLocator.getRingPos()
runLog.extra(
- "Rotating Assembly ({0},{1}) to Orientation {2}".format(i, j, 1)
+ "Rotating Assembly ({0},{1}) to Orientation {2}".format(ring, pos, 1)
)
runLog.extra("Rotated {0} assemblies".format(numRotated))
diff --git a/armi/physics/fuelCycle/fuelHandlerInterface.py b/armi/physics/fuelCycle/fuelHandlerInterface.py
index 79637c320..511dc1856 100644
--- a/armi/physics/fuelCycle/fuelHandlerInterface.py
+++ b/armi/physics/fuelCycle/fuelHandlerInterface.py
@@ -135,7 +135,6 @@ def manageFuel(self, cycle):
fName="{}.shuffles_{}.png".format(self.cs.caseTitle, self.r.p.cycle),
shuffleArrows=arrows,
)
- plotting.close()
def makeShuffleReport(self):
"""
@@ -143,12 +142,11 @@ def makeShuffleReport(self):
This can be used to export shuffling to an external code or to
perform explicit repeat shuffling in a restart.
- It creates a ``*SHUFFLES.txt`` file based on the Reactor.moveList structure
+ It creates a ``*SHUFFLES.txt`` file based on the Reactor.moves structure
See Also
--------
readMoves : reads this file and parses it.
-
"""
fname = self.cs.caseTitle + "-SHUFFLES.txt"
out = open(fname, "w")
@@ -157,7 +155,7 @@ def makeShuffleReport(self):
# remember, we put cycle 0 in so we could do BOL branch searches.
# This also syncs cycles up with external physics kernel cycles.
out.write("Before cycle {0}:\n".format(cycle + 1))
- movesThisCycle = self.r.core.moveList.get(cycle)
+ movesThisCycle = self.r.core.moves.get(cycle)
if movesThisCycle is not None:
for (
fromLoc,
diff --git a/armi/physics/fuelCycle/fuelHandlers.py b/armi/physics/fuelCycle/fuelHandlers.py
index e4f8f2986..40d0c15b7 100644
--- a/armi/physics/fuelCycle/fuelHandlers.py
+++ b/armi/physics/fuelCycle/fuelHandlers.py
@@ -29,7 +29,7 @@
import re
import warnings
-import numpy
+import numpy as np
from armi import runLog
from armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos
@@ -54,19 +54,9 @@ class FuelHandler:
"""
def __init__(self, operator):
- # we need access to the operator to find the core, get settings, grab
- # other interfaces, etc.
+ # we need access to the operator to find the core, get settings, grab other interfaces, etc.
self.o = operator
self.moved = []
- self._handleBackwardsCompatibility()
-
- def _handleBackwardsCompatibility(self):
- # prepSearch used to be part of the API but is deprecated. This will
- # trigger a warning if it's implemented.
- # We have to do this hack until we phase out old inputs.
- # This basically asks: "Did the custom subclass override prepSearch?"
- if self.prepSearch.__func__ is not FuelHandler.prepSearch:
- self.prepSearch()
@property
def cycle(self):
@@ -207,30 +197,28 @@ def prepCore(self):
"""Aux function to run before XS generation (do moderation, etc)."""
pass
- def prepSearch(self, *args, **kwargs):
+ @staticmethod
+ def _compareAssem(candidate, current):
+ """Check whether the candidate assembly should replace the current ideal assembly.
+
+ Given a candidate tuple (diff1, a1) and current tuple (diff2, a2), decide whether the
+ candidate is better than the current ideal. This first compares the diff1 and diff2 values.
+ If diff1 is sufficiently less than diff2, a1 wins, returning True. Otherwise, False. If
+ diff1 and diff2 are sufficiently close, the assembly with the lesser assemNum wins. This
+ should result in a more stable comparison than on floating-point comparisons alone.
"""
- Optional method that can be implemented in preparation of shuffling.
+ if np.isclose(candidate[0], current[0], rtol=1e-8, atol=1e-8):
+ return candidate[1].p.assemNum < current[1].p.assemNum
+ else:
+ return candidate[0] < current[0]
- Often used to prepare the scope of a shuffling branch search.
+ @staticmethod
+ def _getParamMax(a, paramName, blockLevelMax=True):
+ """Get parameter with Block-level maximum."""
+ if blockLevelMax:
+ return a.getChildParamValues(paramName).max()
- Notes
- -----
- This was used historically to keep a long-lived fuel handler in sync
- with the reactor and can now technically be removed from the API, but
- many historical fuel management inputs still expect it to be called
- by the framework, so here it remains. New developments should
- avoid using it. Most code using it has been refactored to just use
- a ``_prepSearch`` private method.
-
- It now should not be used and will trigger a DeprecationWarning
- in the constructor. It's still here because old user-input code
- calls the parent's prepSearch, which is this.
- """
- warnings.warn(
- "`FuelHandler.prepSearch` is being deprecated from the framework. Please "
- "change your fuel management input to call this method directly.",
- DeprecationWarning,
- )
+ return a.p[paramName]
def findAssembly(
self,
@@ -295,24 +283,20 @@ def findAssembly(
maxParam : float or list, optional
a parameter to compare to maxVal for setting upper bounds of acceptable assemblies.
- If list,
- must correspond to parameters in maxVal in order.
+ If list, must correspond to parameters in maxVal in order.
minVal : float or list, optional
a value or a (parameter, multiplier) tuple for setting lower bounds
- For instance, if minParam = 'timeToLimit' and minVal=10, only assemblies with
- timeToLimit higher than 10 will be returned. (Of course, there is also maxParam and
- maxVal)
+ For instance, if minParam='timeToLimit' and minVal=10, only assemblies with timeToLimit
+ higher than 10 will be returned. (Of course, there is also maxParam and maxVal)
maxVal : float or list, optional
a value or a (parameter, multiplier) tuple for setting upper bounds
mandatoryLocations : list, optional
- a list of string-representations of locations in the core for limiting the search to
- several places
-
- Any locations also included in `excludedLocations` will be excluded.
+ A list of string-representations of locations in the core for limiting the search to
+ several places. Any locations also included in `excludedLocations` will be excluded.
excludedLocations : list, optional
a list of string-representations of locations in the core that will be excluded from
@@ -349,20 +333,19 @@ def findAssembly(
default: false.
findFromSfp : bool, optional
- if true, will look in the spent-fuel pool instead of in the core.
+ If true, will look in the spent-fuel pool instead of in the core.
maxNumAssems : int, optional
The maximum number of assemblies to return. Only relevant if findMany==True
circularRingFlag : bool, optional
- A flag to toggle on using rings that are based on distance from the center of the
- reactor
+ Toggle using rings that are based on distance from the center of the reactor
Notes
-----
- The call signature on this method may have gotten slightly out of hand as
- valuable capabilities were added in fuel management studies. For additional expansion,
- it may be worth reconsidering the design of these query operations ;).
+ The call signature on this method may have gotten slightly out of hand as valuable
+ capabilities were added in fuel management studies. For additional expansion, it may be
+ worth reconsidering the design of these query operations.
Returns
-------
@@ -381,29 +364,8 @@ def findAssembly(
typeSpec=Flags.FEED | Flags.FUEL)
"""
-
- def compareAssem(candidate, current):
- """Check whether the candidate assembly should replace the current ideal
- assembly.
-
- Given a candidate tuple (diff1, a1) and current tuple (diff2, a2), decide
- whether the candidate is better than the current ideal. This first compares
- the diff1 and diff2 values. If diff1 is sufficiently less than diff2, a1
- wins, returning True. Otherwise, False. If diff1 and diff2 are sufficiently
- close, the assembly with the lesser assemNum wins. This should result in a
- more stable comparison than on floating-point comparisons alone.
- """
- if numpy.isclose(candidate[0], current[0], rtol=1e-8, atol=1e-8):
- return candidate[1].p.assemNum < current[1].p.assemNum
- else:
- return candidate[0] < current[0]
-
- def getParamWithBlockLevelMax(a, paramName):
- if blockLevelMax:
- return a.getChildParamValues(paramName).max()
- return a.p[paramName]
-
- assemList = [] # list for storing multiple results if findMany is true.
+ # list for storing multiple results if findMany is true.
+ assemList = []
# process input arguments
if targetRing is None:
@@ -452,7 +414,7 @@ def getParamWithBlockLevelMax(a, paramName):
compVal = compareTo * mult
elif param:
# assume compareTo is an assembly
- compVal = getParamWithBlockLevelMax(compareTo, param) * mult
+ compVal = FuelHandler._getParamMax(compareTo, param, blockLevelMax) * mult
if coords:
# find the assembly closest to xt,yt if coords are given without considering params.
@@ -501,12 +463,16 @@ def getParamWithBlockLevelMax(a, paramName):
if isinstance(minVal, tuple):
# tuple turned in. it's a multiplier and a param
realMinVal = (
- getParamWithBlockLevelMax(a, minVal[0]) * minVal[1]
+ FuelHandler._getParamMax(a, minVal[0], blockLevelMax)
+ * minVal[1]
)
else:
realMinVal = minVal
- if getParamWithBlockLevelMax(a, minParam) < realMinVal:
+ if (
+ FuelHandler._getParamMax(a, minParam, blockLevelMax)
+ < realMinVal
+ ):
# this assembly does not meet the minVal specifications. Skip it.
innocent = False
break # for speed (not a big deal here)
@@ -521,12 +487,16 @@ def getParamWithBlockLevelMax(a, paramName):
if isinstance(maxVal, tuple):
# tuple turned in. it's a multiplier and a param
realMaxVal = (
- getParamWithBlockLevelMax(a, maxVal[0]) * maxVal[1]
+ FuelHandler._getParamMax(a, maxVal[0], blockLevelMax)
+ * maxVal[1]
)
else:
realMaxVal = maxVal
- if getParamWithBlockLevelMax(a, maxParam) > realMaxVal:
+ if (
+ FuelHandler._getParamMax(a, maxParam, blockLevelMax)
+ > realMaxVal
+ ):
# this assembly has a maxParam that's higher than maxVal and therefore
# doesn't qualify. skip it.
innocent = False
@@ -551,23 +521,25 @@ def getParamWithBlockLevelMax(a, paramName):
# Now find the assembly with the param closest to the target val.
if param:
- diff = abs(getParamWithBlockLevelMax(a, param) - compVal)
+ diff = abs(
+ FuelHandler._getParamMax(a, param, blockLevelMax) - compVal
+ )
if (
forceSide == 1
- and getParamWithBlockLevelMax(a, param) > compVal
- and compareAssem((diff, a), minDiff)
+ and FuelHandler._getParamMax(a, param, blockLevelMax) > compVal
+ and FuelHandler._compareAssem((diff, a), minDiff)
):
# forceSide=1, so that means look in rings further out
minDiff = (diff, a)
elif (
forceSide == -1
- and getParamWithBlockLevelMax(a, param) < compVal
- and compareAssem((diff, a), minDiff)
+ and FuelHandler._getParamMax(a, param, blockLevelMax) < compVal
+ and FuelHandler._compareAssem((diff, a), minDiff)
):
# forceSide=-1, so that means look in rings closer in from the targetRing
minDiff = (diff, a)
- elif compareAssem((diff, a), minDiff):
+ elif FuelHandler._compareAssem((diff, a), minDiff):
# no preference of which side, just take the one with the closest param.
minDiff = (diff, a)
else:
@@ -852,7 +824,7 @@ def dischargeSwap(self, incoming, outgoing):
have the same number and same height of stationary blocks. If not, return an error.
If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and
- :py:meth:`~armi.reactor.assemblies.Assembly.insert`` methods are used to swap the
+ :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the
stationary blocks between the two assemblies.
Once this process is complete, the actual assembly movement can take place. Through this
@@ -913,6 +885,11 @@ def swapCascade(self, assemList):
"""
Perform swaps on a list of assemblies.
+ Parameters
+ ----------
+ assemList: list
+ A list of assemblies to be shuffled.
+
Notes
-----
[goingOut,inter1,inter2,goingIn] will go to
@@ -927,16 +904,15 @@ def swapCascade(self, assemList):
# first check for duplicates
for assem in assemList:
if assemList.count(assem) != 1:
- runLog.extra("Warning: %s is in the cascade more than once!" % assem)
+ runLog.warning(f"{assem} is in the cascade more than once.")
- # now swap.
+ # now swap
levels = len(assemList)
for level in range(levels - 1):
if not assemList[level + 1]:
- # If None in the cascade, just skip it. this will lead to slightly unintended shuffling if
- # the user wasn't careful enough. Their problem.
- runLog.extra(
- "Skipping level %d in the cascade because it is none" % (level + 1)
+ runLog.info(
+ f"Skipping level {level + 1} in the cascade because it is None. Be careful, "
+ "this might cause an unexpected shuffling order."
)
continue
self.swapAssemblies(assemList[0], assemList[level + 1])
@@ -1427,7 +1403,7 @@ def makeShuffleArrows(self):
currentCoords = a.spatialLocator.getGlobalCoordinates()
oldCoords = self.oldLocations.get(a.getName(), None)
if oldCoords is None:
- oldCoords = numpy.array((-50, -50, 0))
+ oldCoords = np.array((-50, -50, 0))
elif any(currentCoords != oldCoords):
arrows.append((oldCoords, currentCoords))
diff --git a/armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py b/armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py
index c29b09357..e1c73608e 100644
--- a/armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py
+++ b/armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py
@@ -21,9 +21,10 @@
"""
import math
-import numpy
+import numpy as np
from armi import runLog
+from armi.utils.hexagon import getIndexOfRotatedCell
from armi.reactor.flags import Flags
from armi.utils.mathematics import findClosest
@@ -101,10 +102,7 @@ def getOptimalAssemblyOrientation(a, aPrev):
prevAssemPowHereMIN = float("inf")
for possibleRotation in range(6):
- # get rotated pin index
- indexLookup = maxBuBlock.rotatePins(possibleRotation, justCompute=True)
- # rotated index of highest-BU pin
- index = int(indexLookup[maxBuPinIndexAssem])
+ index = getIndexOfRotatedCell(maxBuPinIndexAssem, possibleRotation)
# get pin power at this index in the previously assembly located here
# power previously at rotated index
prevAssemPowHere = aPrev[bIndexMaxBu].p.linPowByPin[index - 1]
@@ -251,9 +249,7 @@ def buildRingSchedule(
# don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]
numSteps = max(numSteps, 2)
- baseRings = [
- int(ring) for ring in numpy.linspace(dischargeRing, chargeRing, numSteps)
- ]
+ baseRings = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)]
# eliminate duplicates.
newBaseRings = []
for br in baseRings:
@@ -331,7 +327,7 @@ def buildConvergentRingSchedule(chargeRing, dischargeRing=1, coarseFactor=0.0):
# don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]
numSteps = max(numSteps, 2)
convergent = [
- int(ring) for ring in numpy.linspace(dischargeRing, chargeRing, numSteps)
+ int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)
]
# step 2. eliminate duplicates
@@ -392,7 +388,7 @@ def _buildEqRingScheduleHelper(ringSchedule, numRings):
toRing = ringSchedule[i + 1]
numRings = abs(toRing - fromRing) + 1
- ringList.extend([int(j) for j in numpy.linspace(fromRing, toRing, numRings)])
+ ringList.extend([int(j) for j in np.linspace(fromRing, toRing, numRings)])
# eliminate doubles (but allow a ring to show up multiple times)
newList = []
@@ -429,8 +425,8 @@ def _squaredDistanceFromOrigin(a):
-------
float: Distance from reactor center
"""
- origin = numpy.array([0.0, 0.0, 0.0])
- p = numpy.array(a.spatialLocator.getLocalCoordinates())
+ origin = np.array([0.0, 0.0, 0.0])
+ p = np.array(a.spatialLocator.getLocalCoordinates())
return ((p - origin) ** 2).sum()
diff --git a/armi/physics/fuelCycle/settings.py b/armi/physics/fuelCycle/settings.py
index 045f08ce4..647a279ae 100644
--- a/armi/physics/fuelCycle/settings.py
+++ b/armi/physics/fuelCycle/settings.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for generic fuel cycle code."""
-from armi.operators import settingsValidation
from armi.settings import setting
+from armi.settings import settingsValidation
CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary"
CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm"
@@ -101,13 +101,19 @@ def getFuelCycleSettings():
label="Plot shuffle arrows",
),
setting.Setting(
- CONF_JUMP_RING_NUM, default=8, label="Jump Ring Number", description="None"
+ CONF_JUMP_RING_NUM,
+ default=8,
+ label="Jump Ring Number",
+ description="The number of hex rings jumped when distributing the feed assemblies in "
+ "the alternating concentric rings or checkerboard shuffle patterns (convergent / "
+ "divergent shuffling).",
),
setting.Setting(
CONF_LEVELS_PER_CASCADE,
default=14,
label="Move per cascade",
- description="None",
+ description="The number of moves made per cascade when performing convergent or "
+ "divergent shuffle patterns.",
),
]
return settings
diff --git a/armi/physics/fuelCycle/tests/test_fuelHandlers.py b/armi/physics/fuelCycle/tests/test_fuelHandlers.py
index c905d28d7..dd3cd843c 100644
--- a/armi/physics/fuelCycle/tests/test_fuelHandlers.py
+++ b/armi/physics/fuelCycle/tests/test_fuelHandlers.py
@@ -110,7 +110,7 @@ def setUp(self):
# generate an assembly
self.assembly = assemblies.HexAssembly("TestAssemblyType")
- self.assembly.spatialGrid = grids.axialUnitGrid(1)
+ self.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)
for _ in range(1):
self.assembly.add(copy.deepcopy(self.block))
@@ -154,6 +154,15 @@ def interactBOC(self, cycle=None):
class TestFuelHandler(FuelHandlerTestHelper):
+ def test_getParamMax(self):
+ a = self.assembly
+
+ res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True)
+ self.assertEqual(res, 0.0)
+
+ res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False)
+ self.assertEqual(res, 0.0)
+
def test_interactBOC(self):
# set up mock interface
self.o.addInterface(MockLatticePhysicsInterface(self.r, self.o.cs))
@@ -212,6 +221,18 @@ def test_outage(self, mockChooseSwaps):
fh.outage(factor=1.0)
self.assertEqual(len(fh.moved), 0)
+ def test_outageEdgeCase(self):
+ class MockFH(fuelHandlers.FuelHandler):
+ def chooseSwaps(self, factor=1.0):
+ self.moved = [None]
+
+ # mock up a fuel handler
+ fh = MockFH(self.o)
+
+ # test edge case
+ with self.assertRaises(AttributeError):
+ fh.outage(factor=1.0)
+
def test_isAssemblyInAZone(self):
# build a fuel handler
fh = fuelHandlers.FuelHandler(self.o)
@@ -404,8 +425,7 @@ def test_findWithMinMax(self):
expected = lastB.parent
self.assertIs(assem, expected)
- # test the impossible: an block with burnup less than
- # 110% of its own burnup
+ # test the impossible: an block with burnup less than 110% of its own burnup
assem = fh.findAssembly(
param="percentBu",
compareTo=100,
@@ -436,14 +456,8 @@ def runShuffling(self, fh):
fh.interactEOL()
def test_repeatShuffles(self):
- """Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies twice.
-
- Notes
- -----
- The custom shuffle logic is executed by :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel`
- within :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`. There are
- two primary assertions: spent fuel pool assemblies are in the correct location and the assemblies were shuffled
- into their correct locations. This process is repeated twice to ensure repeatability.
+ """Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies
+ twice.
.. test:: Execute user-defined shuffle operations based on a reactor model.
:id: T_ARMI_SHUFFLE
@@ -452,6 +466,15 @@ def test_repeatShuffles(self):
.. test:: Move an assembly from one position in the core to another.
:id: T_ARMI_SHUFFLE_MOVE0
:tests: R_ARMI_SHUFFLE_MOVE
+
+ Notes
+ -----
+ The custom shuffle logic is executed by
+ :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` in
+ :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`.
+ There are two primary assertions: spent fuel pool assemblies are in the correct location and
+ the assemblies were shuffled into their correct locations. This process is repeated twice to
+ ensure repeatability.
"""
# check labels before shuffling:
for a in self.r.sfp.getChildren():
@@ -461,9 +484,9 @@ def test_repeatShuffles(self):
fh = self.r.o.getInterface("fuelHandler")
self.runShuffling(fh) # changes caseTitle
- # make sure the generated shuffles file matches the tracked one.
- # This will need to be updated if/when more assemblies are added to the test reactor
- # but must be done carefully. Do not blindly rebaseline this file.
+ # Make sure the generated shuffles file matches the tracked one. This will need to be
+ # updated if/when more assemblies are added to the test reactor but must be done carefully.
+ # Do not blindly rebaseline this file.
self.compareFilesLineByLine("armiRun-SHUFFLES.txt", "armiRun2-SHUFFLES.txt")
# store locations of each assembly
@@ -644,8 +667,7 @@ def test_transferStationaryBlocks(self):
def test_transferDifferentNumberStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
different numbers of stationary blocks.
"""
# grab stationary block flags
@@ -674,8 +696,7 @@ def test_transferDifferentNumberStationaryBlocks(self):
def test_transferUnalignedLocationStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
unaligned locations of stationary blocks.
"""
# grab stationary block flags
@@ -707,7 +728,7 @@ def test_transferUnalignedLocationStationaryBlocks(self):
for sbf in sBFList
)
)
- except: # noqa: bare-except
+ except Exception:
a1[block.spatialLocator.k - 1].setType(
a1[block.spatialLocator.k - 1].p.type, sBFList[0]
)
@@ -809,8 +830,7 @@ def test_dischargeSwap(self):
def test_dischargeSwapIncompatibleStationaryBlocks(self):
"""
- Test the _transferStationaryBlocks method
- for the case where the input assemblies have
+ Test the _transferStationaryBlocks method for the case where the input assemblies have
different numbers as well as unaligned locations of stationary blocks.
"""
# grab stationary block flags
@@ -858,7 +878,7 @@ def test_dischargeSwapIncompatibleStationaryBlocks(self):
for sbf in sBFList
)
)
- except: # noqa: bare-except
+ except Exception:
a1[block.spatialLocator.k - 1].setType(
a1[block.spatialLocator.k - 1].p.type, sBFList[0]
)
@@ -874,6 +894,25 @@ def test_dischargeSwapIncompatibleStationaryBlocks(self):
with self.assertRaises(ValueError):
fh.dischargeSwap(a2, a1)
+ def test_getAssembliesInRings(self):
+ fh = fuelHandlers.FuelHandler(self.o)
+ aList0 = fh._getAssembliesInRings([0], Flags.FUEL, False, None, False)
+ self.assertEqual(len(aList0), 1)
+
+ aList1 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, False)
+ self.assertEqual(len(aList1), 3)
+
+ aList2 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, True, None, False)
+ self.assertEqual(len(aList2), 3)
+
+ aList3 = fh._getAssembliesInRings(
+ [0, 1, 2, "SFP"], Flags.FUEL, True, None, False
+ )
+ self.assertEqual(len(aList3), 4)
+
+ aList4 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, True)
+ self.assertEqual(len(aList4), 3)
+
class TestFuelPlugin(unittest.TestCase):
"""Tests that make sure the plugin is being discovered well."""
diff --git a/armi/physics/fuelPerformance/__init__.py b/armi/physics/fuelPerformance/__init__.py
index ed101d935..bcc4d2039 100644
--- a/armi/physics/fuelPerformance/__init__.py
+++ b/armi/physics/fuelPerformance/__init__.py
@@ -14,31 +14,26 @@
"""
Generic fuel performance plugin package.
-Fuel performance deals with addressing fuel system limits and predicting
-behaviors that are coupled to other physics within the reactor. Often
-fuel performance models address chemical, thermal and mechanical behaviors
-of the fuel system.
+Fuel performance deals with addressing fuel system limits and predicting behaviors that are coupled
+to other physics within the reactor. Often fuel performance models address chemical, thermal and
+mechanical behaviors of the fuel system.
-The following general phenomena fall into the fuel performance category
-of physics for solid fuel (e.g., SFR, LWR, TRISO):
+The following general phenomena fall into the fuel performance category of physics for solid fuel
+(e.g., SFR, LWR, TRISO):
-* chemical degradation on the inside of fuel cladding such as
- fuel-clad chemical interaction (FCCI)
+* chemical degradation on the inside of fuel cladding such as fuel-clad chemical interaction (FCCI)
* corrosion or erosion processes on the outside of the fuel cladding
-* the fuel-clad mechanical interaction (FCMI) resulting in cladding stress
- and strain
+* the fuel-clad mechanical interaction (FCMI) resulting in cladding stress and strain
* pressurization of the fuel pin due to released fission gases
-* high temperatures of the fuel which affect material properties and feedback
- during accident scenarios
+* high temperatures of the fuel which affect material properties and feedback during accident
+ scenarios
-Fuel performance is typically coupled with thermal analysis because the thermal
-conditions of the fuel affects the performance and properties of the fuel
-change with temperature and burnup.
+Fuel performance is typically coupled with thermal analysis because the thermal conditions of the
+fuel affects the performance and properties of the fuel change with temperature and burnup.
-In many cases, fuel performance is coupled with neutronic analysis as well,
-because the fission gases are strong neutron absorbers. In some reactors,
-significant composition changes during irradiation can influence neutronics
-as well (e.g. sodium thermal bond being squeezed out of pins). Finally,
-fuel temperatures impact the Doppler reactivity coefficient.
+In many cases, fuel performance is coupled with neutronic analysis as well, because the fission
+gases are strong neutron absorbers. In some reactors, significant composition changes during
+irradiation can influence neutronics as well (e.g. sodium thermal bond being squeezed out of pins).
+Finally, fuel temperatures impact the Doppler reactivity coefficient.
"""
-from .plugin import FuelPerformancePlugin # noqa: unused-import
+from armi.physics.fuelPerformance.plugin import FuelPerformancePlugin # noqa: F401
diff --git a/armi/physics/fuelPerformance/settings.py b/armi/physics/fuelPerformance/settings.py
index 7a8e56cd0..edef934f2 100644
--- a/armi/physics/fuelPerformance/settings.py
+++ b/armi/physics/fuelPerformance/settings.py
@@ -14,8 +14,8 @@
"""Settings related to fuel performance."""
-from armi.operators.settingsValidation import Query
from armi.settings import setting
+from armi.settings.settingsValidation import Query
CONF_AXIAL_EXPANSION = "axialExpansion"
@@ -44,7 +44,10 @@ def defineSettings():
CONF_FGYF,
default=0.25,
label="Fission Gas Yield Fraction",
- description="The fraction of gaseous atoms produced per fission event, assuming a fission product yield of 2.0",
+ description=(
+ "The fraction of gaseous atoms produced per fission event, assuming a "
+ "fission product yield of 2.0"
+ ),
),
setting.Setting(
CONF_AXIAL_EXPANSION,
diff --git a/armi/physics/neutronics/__init__.py b/armi/physics/neutronics/__init__.py
index 463166c1a..a14f9ab8e 100644
--- a/armi/physics/neutronics/__init__.py
+++ b/armi/physics/neutronics/__init__.py
@@ -33,12 +33,11 @@
# ruff: noqa: F401, E402
from enum import IntEnum
-import numpy
-import tabulate
+import numpy as np
-from armi import plugins
-from armi import runLog
+from armi import plugins, runLog
from armi.physics.neutronics.const import CONF_CROSS_SECTION
+from armi.utils import tabulate
class NeutronicsPlugin(plugins.ArmiPlugin):
@@ -79,8 +78,8 @@ def defineEntryPoints():
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for the plugin."""
- from armi.physics.neutronics import settings as neutronicsSettings
from armi.physics.neutronics import crossSectionSettings
+ from armi.physics.neutronics import settings as neutronicsSettings
from armi.physics.neutronics.fissionProductModel import (
fissionProductModelSettings,
)
@@ -99,10 +98,10 @@ def defineSettings():
@plugins.HOOKIMPL
def defineSettingsValidators(inspector):
"""Implementation of settings inspections for neutronics settings."""
- from armi.physics.neutronics.settings import getNeutronicsSettingValidators
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
getFissionProductModelSettingValidators,
)
+ from armi.physics.neutronics.settings import getNeutronicsSettingValidators
settingsValidators = getNeutronicsSettingValidators(inspector)
settingsValidators.extend(getFissionProductModelSettingValidators(inspector))
@@ -133,7 +132,6 @@ def getReportContents(r, cs, report, stage, blueprint):
RESTARTFILES,
)
-
# ARC and CCCC cross section file format names
COMPXS = "COMPXS"
PMATRX = "PMATRX"
@@ -248,29 +246,31 @@ def applyEffectiveDelayedNeutronFractionToCore(core, cs):
)
core.p.beta = sum(beta)
- core.p.betaComponents = numpy.array(beta)
- core.p.betaDecayConstants = numpy.array(decayConstants)
+ core.p.betaComponents = np.array(beta)
+ core.p.betaDecayConstants = np.array(decayConstants)
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
- reportTableData.append(
- ("Group-wise Delayed Neutron Fractions", core.p.betaComponents)
- )
- reportTableData.append(
- ("Group-wise Precursor Decay Constants", core.p.betaDecayConstants)
- )
+ for i, betaComponent in enumerate(core.p.betaComponents):
+ reportTableData.append(
+ (f"Group {i} Delayed Neutron Fractions", betaComponent)
+ )
+ for i, decayConstant in enumerate(core.p.betaDecayConstants):
+ reportTableData.append(
+ ("Group {i} Precursor Decay Constants", decayConstant)
+ )
# Report to the user the values were not applied.
if not reportTableData and (beta is not None or decayConstants is not None):
runLog.warning(
f"Delayed neutron fraction(s) - {beta} and decay constants"
- " - {decayConstants} have not been applied."
+ f" - {decayConstants} have not been applied."
)
else:
runLog.extra(
tabulate.tabulate(
- tabular_data=reportTableData,
+ data=reportTableData,
headers=["Component", "Value"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
diff --git a/armi/physics/neutronics/crossSectionGroupManager.py b/armi/physics/neutronics/crossSectionGroupManager.py
index f93d24cd7..0131d625a 100644
--- a/armi/physics/neutronics/crossSectionGroupManager.py
+++ b/armi/physics/neutronics/crossSectionGroupManager.py
@@ -56,16 +56,15 @@
import shutil
import string
-import numpy
+import numpy as np
-from armi import context
-from armi import interfaces
-from armi import runLog
+from armi import context, interfaces, runLog
+from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.const import CONF_CROSS_SECTION
+from armi.reactor import flags
from armi.reactor.components import basicShapes
from armi.reactor.flags import Flags
from armi.utils.units import TRACE_NUMBER_DENSITY
-from armi.physics.neutronics import LatticePhysicsFrequency
ORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.CROSS_SECTIONS
@@ -320,13 +319,13 @@ class AverageBlockCollection(BlockCollection):
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS0
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
- This class constructs new blocks from an existing block list based on a
- volume-weighted average. Inheriting functionality from the abstract
- :py:class:`Reactor ` object, this class
- will construct representative blocks using averaged parameters of all blocks in the given collection.
- Number density averages can be computed at a component level
- or at a block level by default. Average nuclide temperatures and burnup are also included when constructing a representative block.
-
+ This class constructs new blocks from an existing block list based on a volume-weighted
+ average. Inheriting functionality from the abstract
+ :py:class:`Reactor `
+ object, this class will construct representative blocks using averaged parameters of all
+ blocks in the given collection. Number density averages can be computed at a component level
+ or at a block level by default. Average nuclide temperatures and burnup are also included
+ when constructing a representative block.
"""
def _makeRepresentativeBlock(self):
@@ -360,7 +359,7 @@ def _getAverageNumberDensities(self):
"""
nuclides = self.allNuclidesInProblem
blocks = self.getCandidateBlocks()
- weights = numpy.array([self.getWeight(b) for b in blocks])
+ weights = np.array([self.getWeight(b) for b in blocks])
weights /= weights.sum() # normalize by total weight
ndens = weights.dot([b.getNuclideNumberDensities(nuclides) for b in blocks])
return dict(zip(nuclides, ndens))
@@ -373,8 +372,8 @@ def _getAverageFuelLFP(self):
def _getNucTempHelper(self):
"""All candidate blocks are used in the average."""
- nvt = numpy.zeros(len(self.allNuclidesInProblem))
- nv = numpy.zeros(len(self.allNuclidesInProblem))
+ nvt = np.zeros(len(self.allNuclidesInProblem))
+ nv = np.zeros(len(self.allNuclidesInProblem))
for block in self.getCandidateBlocks():
wt = self.getWeight(block)
nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(
@@ -395,7 +394,7 @@ def _getAverageComponentNumberDensities(self, compIndex):
"""
nuclides = self.allNuclidesInProblem
blocks = self.getCandidateBlocks()
- weights = numpy.array([self.getWeight(b) for b in blocks])
+ weights = np.array([self.getWeight(b) for b in blocks])
weights /= weights.sum() # normalize by total weight
components = [sorted(b.getComponents())[compIndex] for b in blocks]
ndens = weights.dot([c.getNuclideNumberDensities(nuclides) for c in components])
@@ -407,11 +406,11 @@ def _getAverageComponentTemperature(self, compIndex):
Notes
-----
- Weighting is both by the block weight within the collection and the relative mass of the component.
- The block weight is already scaled by the block volume, so we need to pull that out of the block
- weighting because it would effectively be double-counted in the component mass. b.getHeight()
- is proportional to block volume, so it is used here as a computationally cheaper proxy for scaling
- by block volume.
+ Weighting is both by the block weight within the collection and the relative mass of the
+ Component. The block weight is already scaled by the block volume, so we need to pull that
+ out of the block weighting because it would effectively be double-counted in the component
+ mass. b.getHeight() is proportional to block volume, so it is used here as a computationally
+ cheaper proxy for scaling by block volume.
Returns
-------
@@ -419,7 +418,7 @@ def _getAverageComponentTemperature(self, compIndex):
nucName, ndens data (atoms/bn-cm)
"""
blocks = self.getCandidateBlocks()
- weights = numpy.array([self.getWeight(b) / b.getHeight() for b in blocks])
+ weights = np.array([self.getWeight(b) / b.getHeight() for b in blocks])
weights /= weights.sum() # normalize by total weight
components = [sorted(b.getComponents())[compIndex] for b in blocks]
weightedAvgComponentMass = sum(
@@ -427,11 +426,11 @@ def _getAverageComponentTemperature(self, compIndex):
)
if weightedAvgComponentMass == 0.0:
# if there is no component mass (e.g., gap), do a regular average
- return numpy.mean(numpy.array([c.temperatureInC for c in components]))
+ return np.mean(np.array([c.temperatureInC for c in components]))
else:
return (
weights.dot(
- numpy.array([c.temperatureInC * c.getMass() for c in components])
+ np.array([c.temperatureInC * c.getMass() for c in components])
)
/ weightedAvgComponentMass
)
@@ -440,9 +439,8 @@ def _performAverageByComponent(self):
"""
Check if block collection averaging can/should be performed by component.
- If the components of blocks in the collection are similar and the user
- has requested component-level averaging, return True.
- Otherwise, return False.
+ If the components of blocks in the collection are similar and the user has requested
+ Component-level averaging, return True. Otherwise, return False.
"""
if not self.averageByComponent:
return False
@@ -453,9 +451,8 @@ def _checkBlockSimilarity(self):
"""
Check if blocks in the collection have similar components.
- If the components of blocks in the collection are similar and the user
- has requested component-level averaging, return True.
- Otherwise, return False.
+ If the components of blocks in the collection are similar and the user has requested
+ Component-level averaging, return True. Otherwise, return False.
"""
cFlags = dict()
for b in self.getCandidateBlocks():
@@ -481,8 +478,8 @@ def getBlockNuclideTemperatureAvgTerms(block, allNucNames):
This volume-weights the densities by component volume fraction.
- It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build up)
- as trace values at the proper component temperatures.
+ It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build
+ up) as trace values at the proper component temperatures.
"""
def getNumberDensitiesWithTrace(component, allNucNames):
@@ -497,14 +494,12 @@ def getNumberDensitiesWithTrace(component, allNucNames):
vol = block.getVolume()
components, volFracs = zip(*block.getVolumeFractions())
# D = CxN matrix of number densities
- ndens = numpy.array(
- [getNumberDensitiesWithTrace(c, allNucNames) for c in components]
- )
- temperatures = numpy.array(
+ ndens = np.array([getNumberDensitiesWithTrace(c, allNucNames) for c in components])
+ temperatures = np.array(
[c.temperatureInC for c in components]
) # C-length temperature array
nvBlock = (
- ndens.T * numpy.array(volFracs) * vol
+ ndens.T * np.array(volFracs) * vol
) # multiply each component's values by volume frac, now NxC
nvt = sum((nvBlock * temperatures).T) # N-length array summing over components.
nv = sum(nvBlock.T) # N-length array
@@ -513,29 +508,30 @@ def getNumberDensitiesWithTrace(component, allNucNames):
class CylindricalComponentsAverageBlockCollection(BlockCollection):
"""
- Creates a representative block for the purpose of cross section generation with a one-dimensional
- cylindrical model.
+ Creates a representative block for the purpose of cross section generation with a one-
+ dimensional cylindrical model.
.. impl:: Create representative blocks using custom cylindrical averaging.
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS1
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
- This class constructs representative blocks based on a volume-weighted average
- using cylindrical blocks from an existing block list. Inheriting functionality from the abstract
- :py:class:`Reactor ` object, this class
- will construct representative blocks using averaged parameters of all blocks in the given collection.
- Number density averages are computed at a component level. Nuclide temperatures from a median block-average temperature
- are used and the average burnup is evaluated across all blocks in the block list.
+ This class constructs representative blocks based on a volume-weighted average using
+ cylindrical blocks from an existing block list. Inheriting functionality from the abstract
+ :py:class:`Reactor `
+ object, this class will construct representative blocks using averaged parameters of all
+ blocks in the given collection. Number density averages are computed at a component level.
+ Nuclide temperatures from a median block-average temperature are used and the average burnup
+ is evaluated across all blocks in the block list.
Notes
-----
- When generating the representative block within this collection, the geometry is checked
- against all other blocks to ensure that the number of components are consistent. This implementation
- is intended to be opinionated, so if a user attempts to put blocks that have geometric differences
+ When generating the representative block within this collection, the geometry is checked against
+ all other blocks to ensure that the number of components are consistent. This implementation is
+ intended to be opinionated, so if a user attempts to put blocks that have geometric differences
then this will fail.
- This selects a representative block based on the collection of candidates based on the
- median block average temperatures as an assumption.
+ This selects a representative block based on the collection of candidates based on the median
+ Block-average temperatures as an assumption.
"""
def _getNewBlock(self):
@@ -583,17 +579,18 @@ def _checkComponentConsistency(b, repBlock):
Raises
------
ValueError
- When the components in a candidate block do not align with
- the components in the representative block. This check includes component area, component multiplicity,
- and nuclide composition.
+ When the components in a candidate block do not align with the components in the
+ representative Block. This check includes component area, component multiplicity, and
+ nuclide composition.
"""
if len(b) != len(repBlock):
raise ValueError(
- f"Blocks {b} and {repBlock} have differing number "
- "of components and cannot be homogenized"
+ f"Blocks {b} and {repBlock} have differing number of components and cannot be "
+ "homogenized"
)
- # Using Fe-56 as a proxy for structure and Na-23 as proxy for coolant is undesirably SFR-centric
- # This should be generalized in the future, if possible
+
+ # TODO: Using Fe-56 as a proxy for structure and Na-23 as proxy for coolant is undesirably
+ # SFR-centric. This should be generalized in the future, if possible.
consistentNucs = {"PU239", "U238", "U235", "U234", "FE56", "NA23", "O16"}
for c, repC in zip(sorted(b), sorted(repBlock)):
compString = (
@@ -620,16 +617,18 @@ def _checkComponentConsistency(b, repBlock):
def _getAverageComponentNucs(self, components, bWeights):
"""Compute average nuclide densities by block weights and component area fractions."""
allNucNames = self._getAllNucs(components)
- densities = numpy.zeros(len(allNucNames))
+ densities = np.zeros(len(allNucNames))
totalWeight = 0.0
for c, bWeight in zip(components, bWeights):
weight = bWeight * c.getArea()
totalWeight += weight
- densities += weight * numpy.array(c.getNuclideNumberDensities(allNucNames))
+ densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))
return allNucNames, densities / totalWeight
def _orderComponentsInGroup(self, repBlock):
- """Order the components based on dimension and material type within the representative block."""
+ """Order the components based on dimension and material type within the representative
+ Block.
+ """
for b in self.getCandidateBlocks():
self._checkComponentConsistency(b, repBlock)
componentLists = [list(sorted(b)) for b in self.getCandidateBlocks()]
@@ -637,8 +636,8 @@ def _orderComponentsInGroup(self, repBlock):
def _getNucTempHelper(self):
"""All candidate blocks are used in the average."""
- nvt = numpy.zeros(len(self.allNuclidesInProblem))
- nv = numpy.zeros(len(self.allNuclidesInProblem))
+ nvt = np.zeros(len(self.allNuclidesInProblem))
+ nv = np.zeros(len(self.allNuclidesInProblem))
for block in self.getCandidateBlocks():
wt = self.getWeight(block)
nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(
@@ -655,13 +654,13 @@ class SlabComponentsAverageBlockCollection(BlockCollection):
Notes
-----
- - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D slab geometry
- since it is used for low power neutronic validation.
- - Checks for consistent component dimensions for all blocks in a group and then creates a new block.
- - Iterates through components of all blocks and calculates component average number densities. This calculation
- takes the first component of each block, averages the number densities, and applies this to the number density
- to the representative block.
-
+ - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D
+ slab geometry since it is used for low power neutronic validation.
+ - Checks for consistent component dimensions for all blocks in a group and then creates a new
+ Block.
+ - Iterates through components of all blocks and calculates component average number densities.
+ This calculation takes the first component of each block, averages the number densities, and
+ applies this to the number density to the representative block.
"""
def _getNewBlock(self):
@@ -704,14 +703,16 @@ def _checkComponentConsistency(b, repBlock, components=None):
Raises
------
ValueError
- When the components in a candidate block do not align with
- the components in the representative block. This check includes component area, component multiplicity,
- and nuclide composition.
+ When the components in a candidate block do not align with the components in the
+ representative block. This check includes component area, component multiplicity, and
+ nuclide composition.
TypeError
When the shape of the component is not a rectangle.
- .. warning:: This only checks ``consistentNucs`` for ones that are important in ZPPR and BFS.
+ Warning
+ -------
+ This only checks ``consistentNucs`` for ones that are important in ZPPR and BFS.
"""
comps = b if components is None else components
@@ -770,7 +771,8 @@ def _removeLatticeComponents(repBlock):
Notes
-----
- - This component does not serve any purpose for XS generation as it contains void material with zero area.
+ - This component does not serve any purpose for XS generation as it contains void material
+ with zero area.
- Removing this component does not modify the blocks within the reactor.
"""
for c in repBlock.iterComponents():
@@ -781,12 +783,12 @@ def _removeLatticeComponents(repBlock):
def _getAverageComponentNucs(self, components, bWeights):
"""Compute average nuclide densities by block weights and component area fractions."""
allNucNames = self._getAllNucs(components)
- densities = numpy.zeros(len(allNucNames))
+ densities = np.zeros(len(allNucNames))
totalWeight = 0.0
for c, bWeight in zip(components, bWeights):
weight = bWeight * c.getArea()
totalWeight += weight
- densities += weight * numpy.array(c.getNuclideNumberDensities(allNucNames))
+ densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))
return allNucNames, densities / totalWeight
def _orderComponentsInGroup(self, repBlock):
@@ -869,11 +871,11 @@ def interactBOL(self):
reactor state.
"""
# now that all cs settings are loaded, apply defaults to compound XS settings
- from armi.physics.neutronics.settings import CONF_XS_BLOCK_REPRESENTATION
from armi.physics.neutronics.settings import (
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,
+ CONF_LATTICE_PHYSICS_FREQUENCY,
+ CONF_XS_BLOCK_REPRESENTATION,
)
- from armi.physics.neutronics.settings import CONF_LATTICE_PHYSICS_FREQUENCY
self.cs[CONF_CROSS_SECTION].setDefaults(
self.cs[CONF_XS_BLOCK_REPRESENTATION],
@@ -960,7 +962,7 @@ def interactCoupled(self, iteration):
See Also
--------
- :py:meth:`Assembly `
+ :py:meth:`~armi.physics.neutronics.latticePhysics.latticePhysics.LatticePhysicsInterface.interactCoupled`
"""
if (
iteration == 0
@@ -1228,8 +1230,19 @@ def createRepresentativeBlocksUsingExistingBlocks(
for newXSID in modifiedReprBlocks:
oldXSID = origXSIDsFromNew[newXSID]
oldBlockCollection = blockCollectionByXsGroup[oldXSID]
+
+ # create a new block collection that inherits all of the properties
+ # and settings from oldBlockCollection.
+ validBlockTypes = oldBlockCollection._validRepresentativeBlockTypes
+ if validBlockTypes is not None and len(validBlockTypes) > 0:
+ validBlockTypes = [
+ flags._toString(Flags, flag)
+ for flag in oldBlockCollection._validRepresentativeBlockTypes
+ ]
newBlockCollection = oldBlockCollection.__class__(
- oldBlockCollection.allNuclidesInProblem
+ oldBlockCollection.allNuclidesInProblem,
+ validBlockTypes=validBlockTypes,
+ averageByComponent=oldBlockCollection.averageByComponent,
)
newBlockCollectionsByXsGroup[newXSID] = newBlockCollection
return newBlockCollectionsByXsGroup, modifiedReprBlocks, origXSIDsFromNew
@@ -1288,6 +1301,12 @@ def _getModifiedReprBlocks(self, blockList, originalRepresentativeBlocks):
if b.getMicroSuffix() == origXSID:
b.p.xsType = newXSType
+ # copy XS settings to new XS ID
+ self.cs[CONF_CROSS_SECTION][newXSID] = copy.deepcopy(
+ self.cs[CONF_CROSS_SECTION][origXSID]
+ )
+ self.cs[CONF_CROSS_SECTION][newXSID].xsID = newXSID
+
return modifiedReprBlocks, origXSIDsFromNew
def getNextAvailableXsTypes(self, howMany=1, excludedXSTypes=None):
diff --git a/armi/physics/neutronics/crossSectionSettings.py b/armi/physics/neutronics/crossSectionSettings.py
index 439cf7abf..22a58cc14 100644
--- a/armi/physics/neutronics/crossSectionSettings.py
+++ b/armi/physics/neutronics/crossSectionSettings.py
@@ -600,7 +600,6 @@ def setDefaults(self, blockRepresentation, validBlockTypes):
----------
blockRepresentation : str
Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS``
-
validBlockTypes : list of str or bool
This configures which blocks (by their type) that the cross section
group manager will merge together to create a representative block. If
@@ -611,11 +610,11 @@ def setDefaults(self, blockRepresentation, validBlockTypes):
Notes
-----
- These defaults are application-specific and design specific. They are included
- to provide an example and are tuned to fit the internal needs of TerraPower. Consider
- a separate implementation/subclass if you would like different behavior.
+ These defaults are application-specific and design specific. They are included to provide an
+ example and are tuned to fit the internal needs of TerraPower. Consider a separate
+ implementation/subclass if you would like different behavior.
"""
- if type(validBlockTypes) == bool:
+ if type(validBlockTypes) is bool:
validBlockTypes = None if validBlockTypes else ["fuel"]
else:
validBlockTypes = validBlockTypes
diff --git a/armi/physics/neutronics/energyGroups.py b/armi/physics/neutronics/energyGroups.py
index 9cee0d887..5da90636d 100644
--- a/armi/physics/neutronics/energyGroups.py
+++ b/armi/physics/neutronics/energyGroups.py
@@ -17,7 +17,7 @@
import itertools
import math
-import numpy
+import numpy as np
from armi import runLog
from armi.utils.mathematics import findNearestValue
@@ -111,12 +111,12 @@ def getGroupStructure(name):
def getGroupStructureType(neutronEnergyBoundsInEv):
"""Return neutron energy group structure name for a given set of neutron energy group bounds in eV."""
- neutronEnergyBoundsInEv = numpy.array(neutronEnergyBoundsInEv)
+ neutronEnergyBoundsInEv = np.array(neutronEnergyBoundsInEv)
for groupStructureType in GROUP_STRUCTURE:
- refNeutronEnergyBoundsInEv = numpy.array(getGroupStructure(groupStructureType))
+ refNeutronEnergyBoundsInEv = np.array(getGroupStructure(groupStructureType))
if len(refNeutronEnergyBoundsInEv) != len(neutronEnergyBoundsInEv):
continue
- if numpy.allclose(refNeutronEnergyBoundsInEv, neutronEnergyBoundsInEv, 1e-5):
+ if np.allclose(refNeutronEnergyBoundsInEv, neutronEnergyBoundsInEv, 1e-5):
return groupStructureType
raise ValueError(
"Neutron energy group structure type does not exist for the given neutron energy bounds: {}".format(
diff --git a/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py b/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
index f42669afc..d23e1b142 100644
--- a/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
+++ b/armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
@@ -31,13 +31,16 @@ def defineSettings():
default="infinitelyDilute",
label="Fission Product Model",
description=(
- "This setting is used to determine how fission products are treated in an analysis. "
- "By choosing `noFissionProducts`, no fission products will be added. By selecting, `infinitelyDilute`, "
- "lumped fission products will be initialized to a very small number on the blocks/components that require them. "
- "By choosing `MO99`, the fission products will be represented only by Mo-99. This is a simplistic assumption that "
- "is commonly used by fast reactor analyses in scoping calculations and is not necessarily a great assumption for "
- "depletion evaluations. Finally, by choosing `explicitFissionProducts` the fission products will be added explicitly "
- "to the blocks/components that are depletable. This is useful for detailed tracking of fission products."
+ "This setting is used to determine how fission products are treated in an "
+ "analysis. By choosing `noFissionProducts`, no fission products will be added. By "
+ "selecting, `infinitelyDilute`, lumped fission products will be initialized to a "
+ "very small number on the blocks/components that require them. By choosing `MO99`, "
+ "the fission products will be represented only by Mo-99. This is a simplistic "
+ "assumption that is commonly used by fast reactor analyses in scoping calculations "
+ "and is not necessarily a great assumption for depletion evaluations. Finally, by "
+ "choosing `explicitFissionProducts` the fission products will be added explicitly "
+ "to the blocks/components that are depletable. This is useful for detailed tracking "
+ "of fission products."
),
options=[
"noFissionProducts",
@@ -51,16 +54,12 @@ def defineSettings():
default="",
label="Fission Product Library",
description=(
- f"This setting can used when the `{CONF_FP_MODEL}` setting "
- "is set to `explicitFissionProducts` and is used to configure "
- "all the nuclides that should be modeled within the core. "
- "Setting this is equivalent to adding all nuclides in the "
- "selected code library (i.e., MC2-3) within the blueprints "
- "`nuclideFlags` to be [xs:true, burn:false]. This option acts "
- "as a short-cut so that analysts do not need to change their "
- "inputs when modifying the fission product treatment for "
- "calculations. This may be extended for other cross section "
- "generation codes."
+ f"This setting should be used when `{CONF_FP_MODEL}` is set to "
+ "`explicitFissionProducts`. It is used in conjunction with any nuclideFlags "
+ "defined in the blueprints to configure all the nuclides that are modeled within "
+ "the core. Selecting any library option will add all nuclides from the selected "
+ "library to the model so that analysts do not need to change their inputs when "
+ "modifying the fission product treatment for calculations."
),
options=[
"",
@@ -72,9 +71,9 @@ def defineSettings():
default=False,
label="Use Independent LFPs",
description=(
- "Flag to make all blocks have independent lumped fission products. Note that this is forced to be True "
- "when the ``explicitFissionProducts`` modeling option is selected or an interface named `mcnp` is "
- "on registered on the operator stack."
+ "Flag to make all blocks have independent lumped fission products. Note that this "
+ "is forced to be True when the `explicitFissionProducts` modeling option is "
+ "selected or an interface named `mcnp` is on registered on the operator stack."
),
),
setting.Setting(
@@ -82,9 +81,9 @@ def defineSettings():
default=fissionProductModel.REFERENCE_LUMPED_FISSION_PRODUCT_FILE,
label="LFP Definition File",
description=(
- "Path to the file that contains lumped fission product composition "
- "definitions (e.g. equilibrium yields). This is unused when the "
- "`explicitFissionProducts` or `MO99` modeling options are selected."
+ "Path to the file that contains lumped fission product composition definitions "
+ "(e.g. equilibrium yields). This is unused when the `explicitFissionProducts` or "
+ "`MO99` modeling options are selected."
),
),
]
@@ -94,7 +93,7 @@ def defineSettings():
def getFissionProductModelSettingValidators(inspector):
"""The standard helper method, to provide validators to the fission product model."""
# Import the Query class here to avoid circular imports.
- from armi.operators.settingsValidation import Query
+ from armi.settings.settingsValidation import Query
queries = []
@@ -103,8 +102,8 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and not bool(inspector.cs["initializeBurnChain"]),
(
- "The burn chain is not being initialized and the fission product model is not set to `explicitFissionProducts`. "
- "This will likely fail."
+ "The burn chain is not being initialized and the fission product model is not set "
+ "to `explicitFissionProducts`. This will likely fail."
),
f"Would you like to set the `{CONF_FP_MODEL}` to `explicitFissionProducts`?",
lambda: inspector._assignCS(CONF_FP_MODEL, "explicitFissionProducts"),
@@ -116,8 +115,9 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] != "",
(
- "The explicit fission product model is disabled and the fission product model library is set. This will have no "
- f"impact on the results, but it is best to disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option."
+ "The explicit fission product model is disabled and the fission product model "
+ "library is set. This will have no impact on the results, but it is best to "
+ f"disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option."
),
"Would you like to do this?",
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, ""),
@@ -129,8 +129,8 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and bool(inspector.cs["initializeBurnChain"]),
(
- "The explicit fission product model is enabled, but initializing the burn chain is also enabled. This will "
- "likely fail."
+ "The explicit fission product model is enabled, but initializing the burn chain is "
+ "also enabled. This will likely fail."
),
"Would you like to disable the burn chain initialization?",
lambda: inspector._assignCS("initializeBurnChain", False),
@@ -142,10 +142,14 @@ def getFissionProductModelSettingValidators(inspector):
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "",
(
- "The explicit fission product model is enabled and the fission product model library is disabled. May result in "
- "no fission product nuclides being added to the case, unless these have manually added in `nuclideFlags`."
+ "The explicit fission product model is enabled and the fission product model "
+ "library is disabled. May result in no fission product nuclides being added to the "
+ "case, unless these have manually added in `nuclideFlags`."
+ ),
+ (
+ f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be "
+ "equal to the default implementation of MC2-3?."
),
- f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be equal to the default implementation of MC2-3?.",
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, "MC2-3"),
)
)
diff --git a/armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py b/armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py
index 0a3f849ad..4a5c1ce75 100644
--- a/armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py
+++ b/armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py
@@ -180,23 +180,6 @@ def getMassFracDenom(self):
massFracDenom += self[nuc] * nuc.weight
return massFracDenom
- def printDensities(self, lfpDens):
- """
- Print number densities of nuclides within the lumped fission product.
-
- Parameters
- ----------
- lfpDens : float
- Number density (atom/b-cm) of the lumped fission product
-
- Notes
- -----
- This multiplies the provided number density for the lumped fission
- product by the yield of each nuclide.
- """
- for n in sorted(self.keys()):
- runLog.info("{0:6s} {1:.7E}".format(n.name, lfpDens * self[n]))
-
class LumpedFissionProductCollection(dict):
"""
diff --git a/armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py b/armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py
index 569168e7d..a21b5abd9 100644
--- a/armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py
+++ b/armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py
@@ -67,7 +67,7 @@ def test_getAllFissionProductNames(self):
self.assertIn("XE135", fissionProductNames)
def test_fpApplication(self):
- o, r = loadTestReactor()
+ o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
# Set up the global LFPs and check that they are setup.
self.assertTrue(fpModel._useGlobalLFPs)
@@ -91,14 +91,19 @@ def test_fpApplication(self):
class TestFissionProductModelExplicitMC2Library(unittest.TestCase):
- """Tests the fission product model interface behavior when explicit fission products are enabled."""
+ """
+ Tests the fission product model interface behavior when explicit fission products are enabled.
+
+ These tests can use a smaller test reactor, and so will be faster.
+ """
def setUp(self):
o, r = loadTestReactor(
customSettings={
CONF_FP_MODEL: "explicitFissionProducts",
CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3",
- }
+ },
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
self.r = r
self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
@@ -125,6 +130,26 @@ def test_nuclidesInModelFuel(self):
for nb in nuclideBases.byMcc3Id.values():
self.assertIn(nb.name, nuclideList)
+
+class TestFissionProductModelExplicitMC2LibrarySlower(unittest.TestCase):
+ """
+ Tests the fission product model interface behavior when explicit fission products are enabled.
+
+ These tests require a large test reactor, and will lead to slower tests.
+ """
+
+ def setUp(self):
+ o, r = loadTestReactor(
+ customSettings={
+ CONF_FP_MODEL: "explicitFissionProducts",
+ CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3",
+ }
+ )
+ self.r = r
+ self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
+ # Set up the global LFPs and check that they are setup.
+ self.assertFalse(self.fpModel._useGlobalLFPs)
+
def test_nuclidesInModelAllDepletableBlocks(self):
"""Test that the depletable blocks contain all the MC2-3 modeled nuclides.
diff --git a/armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py b/armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py
index 4c8651099..9d5f7f796 100644
--- a/armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py
+++ b/armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py
@@ -118,11 +118,6 @@ def test_gaseousYieldFraction(self):
# data for these tests.
self.assertEqual(lfp.getGaseousYieldFraction(), 8.9000e-05)
- def test_printDensities(self):
- _ = nuclideBases.fromName("XE135")
- lfp = self.fpd.createSingleLFPFromFile("LFP38")
- lfp.printDensities(10.0)
-
def test_isGas(self):
"""Tests that a nuclide is a gas or not at STP based on its chemical phase."""
nb = nuclideBases.byName["H1"]
diff --git a/armi/physics/neutronics/globalFlux/globalFluxInterface.py b/armi/physics/neutronics/globalFlux/globalFluxInterface.py
index 1b202f91c..11a4053fc 100644
--- a/armi/physics/neutronics/globalFlux/globalFluxInterface.py
+++ b/armi/physics/neutronics/globalFlux/globalFluxInterface.py
@@ -12,11 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""The Global flux interface provide a base class for all neutronics tools that compute the neutron and/or photon flux."""
+"""The Global flux interface provide a base class for all neutronics tools that compute the neutron
+and/or photon flux.
+"""
import math
from typing import Dict, Optional
-import numpy
+import numpy as np
import scipy.integrate
from armi import interfaces
@@ -31,7 +33,7 @@
from armi.reactor.converters import uniformMesh
from armi.reactor.flags import Flags
from armi.settings.caseSettings import Settings
-from armi.utils import units, codeTiming, getMaxBurnSteps
+from armi.utils import units, codeTiming, getMaxBurnSteps, getBurnSteps
ORDER = interfaces.STACK_ORDER.FLUX
@@ -83,7 +85,7 @@ def getHistoryParams():
def interactBOC(self, cycle=None):
interfaces.Interface.interactBOC(self, cycle)
- self.r.core.p.rxSwing = 0.0 # zero out rxSwing until EOC.
+ self.r.core.p.rxSwing = 0.0 # zero out rxSwing until last time node.
self.r.core.p.maxDetailedDpaThisCycle = 0.0 # zero out cumulative params
self.r.core.p.dpaFullWidthHalfMax = 0.0
self.r.core.p.elevationOfACLP3Cycles = 0.0
@@ -100,23 +102,30 @@ def interactEveryNode(self, cycle, node):
is up to date with the reactor state.
"""
interfaces.Interface.interactEveryNode(self, cycle, node)
-
- if self.r.p.timeNode == 0:
- self._bocKeff = self.r.core.p.keff # track boc keff for rxSwing param.
+ self._setRxSwingRelatedParams()
def interactCoupled(self, iteration):
"""Runs during a tightly-coupled physics iteration to updated the flux and power."""
interfaces.Interface.interactCoupled(self, iteration)
+ self._setRxSwingRelatedParams()
+
+ def _setRxSwingRelatedParams(self):
+ """Set Params Related to Rx Swing."""
if self.r.p.timeNode == 0:
- self._bocKeff = self.r.core.p.keff # track boc keff for rxSwing param.
-
- def interactEOC(self, cycle=None):
- interfaces.Interface.interactEOC(self, cycle)
- if self._bocKeff is not None:
- self.r.core.p.rxSwing = (
- (self.r.core.p.keff - self._bocKeff)
- / self._bocKeff
- * units.ABS_REACTIVITY_TO_PCM
+ # track boc uncontrolled keff for rxSwing param.
+ self._bocKeff = self.r.core.p.keffUnc or self.r.core.p.keff
+
+ # A 1 burnstep cycle would have 2 nodes, and the last node would be node index 1 (first is zero)
+ lastNodeInCycle = getBurnSteps(self.cs)[self.r.p.cycle]
+ if self.r.p.timeNode == lastNodeInCycle and self._bocKeff is not None:
+
+ eocKeff = self.r.core.p.keffUnc or self.r.core.p.keff
+ swing = (eocKeff - self._bocKeff) / (eocKeff * self._bocKeff)
+ self.r.core.p.rxSwing = swing * units.ABS_REACTIVITY_TO_PCM
+ runLog.info(
+ f"BOC Uncontrolled keff: {self._bocKeff}, "
+ f"EOC Uncontrolled keff: {self.r.core.p.keffUnc}, "
+ f"Cycle Reactivity Swing: {self.r.core.p.rxSwing} pcm"
)
def checkEnergyBalance(self):
@@ -721,7 +730,7 @@ def _renormalizeNeutronFluxByBlock(self, renormalizationCorePower):
currentCorePower = 0.0
for b in self.r.core.getBlocks():
# The multi-group flux is volume integrated, so J/cm * n-cm/s gives units of Watts
- b.p.power = numpy.dot(
+ b.p.power = np.dot(
b.getTotalEnergyGenerationConstants(), b.getIntegratedMgFlux()
)
b.p.flux = sum(b.getMgFlux())
@@ -992,13 +1001,13 @@ def updateFluenceAndDpa(self, stepTimeInSeconds, blockList=None):
# if it is a non-hex block, this should be a no-op
if b.p.pointsCornerDpaRate is not None:
if b.p.pointsCornerDpa is None:
- b.p.pointsCornerDpa = numpy.zeros((6,))
+ b.p.pointsCornerDpa = np.zeros((6,))
b.p.pointsCornerDpa = (
b.p.pointsCornerDpa + b.p.pointsCornerDpaRate * stepTimeInSeconds
)
if b.p.pointsEdgeDpaRate is not None:
if b.p.pointsEdgeDpa is None:
- b.p.pointsEdgeDpa = numpy.zeros((6,))
+ b.p.pointsEdgeDpa = np.zeros((6,))
b.p.pointsEdgeDpa = (
b.p.pointsEdgeDpa + b.p.pointsEdgeDpaRate * stepTimeInSeconds
)
@@ -1029,7 +1038,8 @@ def updateFluenceAndDpa(self, stepTimeInSeconds, blockList=None):
b.p.percentBuPeak + peakRatePerSecond * stepTimeInSeconds
)
else:
- # No rate, make bad assumption.... assumes peaking is same at each position through shuffling/irradiation history...
+ # No rate, make bad assumption.... assumes peaking is same at each position through
+ # shuffling/irradiation history...
runLog.warning(
"Scaling burnup by current peaking factor... This assumes peaking "
"factor was constant through shuffling/irradiation history.",
@@ -1045,7 +1055,7 @@ def updateFluenceAndDpa(self, stepTimeInSeconds, blockList=None):
self.updateLoadpadDose()
def updateCycleDoseParams(self):
- r"""Updates reactor params based on the amount of dose (detailedDpa) accrued this cycle.
+ """Updates reactor params based on the amount of dose (detailedDpa) accrued this cycle.
Params updated include:
@@ -1135,7 +1145,6 @@ def updateLoadpadDose(self):
See Also
--------
_calcLoadPadDose : computes the load pad dose
-
"""
peakPeak, peakAvg = self._calcLoadPadDose()
if peakPeak is None:
@@ -1165,8 +1174,7 @@ def _calcLoadPadDose(self):
loadPadLength : float
The axial length of the load pad to average over
- This builds axial splines over the assemblies and then integrates them
- over the load pad.
+ This builds axial splines over the assemblies and then integrates them over the load pad.
The assumptions are that detailedDpa is the average, defined in the center
and detailedDpaPeak is the peak, also defined in the center of blocks.
@@ -1187,7 +1195,6 @@ def _calcLoadPadDose(self):
--------
writeLoadPadDoseSummary : prints out the dose
Assembly.getParamValuesAtZ : gets the parameters at any arbitrary z point
-
"""
loadPadBottom = self.options.loadPadElevation
loadPadLength = self.options.loadPadLength
@@ -1199,7 +1206,7 @@ def _calcLoadPadDose(self):
peakAvg = (0.0, None)
loadPadTop = loadPadBottom + loadPadLength
- zrange = numpy.linspace(loadPadBottom, loadPadTop, 100)
+ zrange = np.linspace(loadPadBottom, loadPadTop, 100)
for a in self.r.core.getAssemblies(Flags.FUEL):
# scan over the load pad to find the peak dpa
# no caching.
@@ -1226,20 +1233,6 @@ def computeDpaRate(mgFlux, dpaXs):
r"""
Compute the DPA rate incurred by exposure of a certain flux spectrum.
- Parameters
- ----------
- mgFlux : list
- multigroup neutron flux in #/cm^2/s
-
- dpaXs : list
- DPA cross section in barns to convolute with flux to determine DPA rate
-
- Returns
- -------
- dpaPerSecond : float
- The dpa/s in this material due to this flux
-
-
.. impl:: Compute DPA rates.
:id: I_ARMI_FLUX_DPA
:implements: R_ARMI_FLUX_DPA
@@ -1272,11 +1265,23 @@ def computeDpaRate(mgFlux, dpaXs):
the number density of the structural material cancels out. It's in the macroscopic
cross-section and in the original number of atoms.
+ Parameters
+ ----------
+ mgFlux : list
+ multigroup neutron flux in #/cm^2/s
+
+ dpaXs : list
+ DPA cross section in barns to convolute with flux to determine DPA rate
+
+ Returns
+ -------
+ dpaPerSecond : float
+ The dpa/s in this material due to this flux
+
Raises
------
RuntimeError
Negative dpa rate.
-
"""
displacements = 0.0
if len(mgFlux) != len(dpaXs):
@@ -1312,21 +1317,6 @@ def calcReactionRates(obj, keff, lib):
r"""
Compute 1-group reaction rates for this object (usually a block).
- Parameters
- ----------
- obj : Block
- The object to compute reaction rates on. Notionally this could be upgraded to be
- any kind of ArmiObject but with params defined as they are it currently is only
- implemented for a block.
-
- keff : float
- The keff of the core. This is required to get the neutron production rate correct
- via the neutron balance statement (since nuSigF has a 1/keff term).
-
- lib : XSLibrary
- Microscopic cross sections to use in computing the reaction rates.
-
-
.. impl:: Return the reaction rates for a given ArmiObject
:id: I_ARMI_FLUX_RX_RATES
:implements: R_ARMI_FLUX_RX_RATES
@@ -1360,6 +1350,20 @@ def calcReactionRates(obj, keff, lib):
\sigma_g = \frac{\int_{E g}^{E_{g+1}} \phi(E) \sigma(E)
dE}{\int_{E_g}^{E_{g+1}} \phi(E) dE}
+
+ Parameters
+ ----------
+ obj : Block
+ The object to compute reaction rates on. Notionally this could be upgraded to be
+ any kind of ArmiObject but with params defined as they are it currently is only
+ implemented for a block.
+
+ keff : float
+ The keff of the core. This is required to get the neutron production rate correct
+ via the neutron balance statement (since nuSigF has a 1/keff term).
+
+ lib : XSLibrary
+ Microscopic cross sections to use in computing the reaction rates.
"""
rate = {}
for simple in RX_PARAM_NAMES:
diff --git a/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py b/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
index 5b34fbdd1..671e64773 100644
--- a/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
+++ b/armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
@@ -12,11 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic global flux interface."""
+import logging
import unittest
from unittest.mock import patch
-import numpy
+import numpy as np
+from armi import runLog
from armi import settings
from armi.nuclearDataIO.cccc import isotxs
from armi.physics.neutronics.globalFlux import globalFluxInterface
@@ -30,6 +32,7 @@
from armi.reactor.tests import test_blocks
from armi.reactor.tests import test_reactors
from armi.tests import ISOAA_PATH
+from armi.tests import mockRunLogs
class MockReactorParams:
@@ -168,12 +171,31 @@ def test_interaction(self):
Check that a 1000 pcm rx swing is observed due to the mock.
"""
cs = settings.Settings()
- _o, r = test_reactors.loadTestReactor()
+ cs["burnSteps"] = 2
+ _o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
gfi = MockGlobalFluxInterface(r, cs)
+ bocKeff = 1.1
+ r.core.p.keffUnc = 1.1
gfi.interactBOC()
+
+ r.p.cycle, r.p.timeNode = 0, 0
gfi.interactEveryNode(0, 0)
- gfi.interactEOC()
- self.assertAlmostEqual(r.core.p.rxSwing, 1000)
+ self.assertAlmostEqual(gfi._bocKeff, r.core.p.keffUnc)
+ r.core.p.keffUnc = 1.05
+ r.p.cycle, r.p.timeNode = 0, 1
+ gfi.interactEveryNode(0, 1)
+ # doesn't change since its not the first node
+ self.assertAlmostEqual(gfi._bocKeff, bocKeff)
+ r.core.p.keffUnc = 1.01
+ r.p.cycle, r.p.timeNode = 0, 2
+ gfi.interactEveryNode(0, 2)
+ self.assertAlmostEqual(gfi._bocKeff, bocKeff)
+ self.assertAlmostEqual(r.core.p.rxSwing, -1e5 * (1.1 - 1.01) / (1.1 * 1.01))
+ gfi.interactBOC(0)
+ # now its zeroed at BOC
+ self.assertAlmostEqual(r.core.p.rxSwing, 0)
def test_getIOFileNames(self):
cs = settings.Settings()
@@ -194,7 +216,9 @@ def test_checkEnergyBalance(self):
:tests: R_ARMI_FLUX_CHECK_POWER
"""
cs = settings.Settings()
- _o, r = test_reactors.loadTestReactor()
+ _o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
gfi = MockGlobalFluxInterface(r, cs)
self.assertEqual(gfi.checkEnergyBalance(), None)
@@ -210,7 +234,9 @@ class TestGlobalFluxInterfaceWithExecuters(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cs = settings.Settings()
- _o, cls.r = test_reactors.loadTestReactor()
+ cls.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )[1]
def setUp(self):
self.r.core.p.keff = 1.0
@@ -295,7 +321,9 @@ class TestGlobalFluxInterfaceWithExecutersNonUniform(unittest.TestCase):
@classmethod
def setUpClass(cls):
cs = settings.Settings()
- _o, cls.r = test_reactors.loadTestReactor()
+ _o, cls.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
cls.r.core.p.keff = 1.0
cls.gfi = MockGlobalFluxWithExecutersNonUniform(cls.r, cs)
@@ -306,7 +334,8 @@ def test_executerInteractionNonUniformAssems(self, mockConverterFactory):
This will serve as a broad end-to-end test of the interface, and also
stress test the mesh issues with non-uniform assemblies.
- .. test:: Run the global flux interface to show the geometry converter is called when the nonuniform mesh option is used.
+ .. test:: Run the global flux interface to show the geometry converter is called when the
+ nonuniform mesh option is used.
:id: T_ARMI_FLUX_GEOM_TRANSFORM_CONV
:tests: R_ARMI_FLUX_GEOM_TRANSFORM
"""
@@ -335,9 +364,12 @@ class TestGlobalFluxResultMapper(unittest.TestCase):
"""
def test_mapper(self):
- # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible
- # with actually doing some math using the ISOAA test microscopic library
- o, r = test_reactors.loadTestReactor(customSettings={CONF_XS_KERNEL: "MC2v2"})
+ # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible with
+ # actually doing some math using the ISOAA test microscopic library
+ o, r = test_reactors.loadTestReactor(
+ customSettings={CONF_XS_KERNEL: "MC2v2"},
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml",
+ )
applyDummyFlux(r)
r.core.lib = isotxs.readBinary(ISOAA_PATH)
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=o.cs)
@@ -353,25 +385,93 @@ def test_mapper(self):
block = r.core.getFirstBlock()
self.assertGreater(block.p.detailedDpaRate, 0)
self.assertEqual(block.p.detailedDpa, 0)
- block.p.pointsEdgeDpa = numpy.array([0 for i in range(6)])
- block.p.pointsCornerDpa = numpy.array([0 for i in range(6)])
- block.p.pointsEdgeDpaRate = numpy.array([1.0e-5 for i in range(6)])
- block.p.pointsCornerDpaRate = numpy.array([1.0e-5 for i in range(6)])
+ block.p.pointsEdgeDpa = np.array([0 for i in range(6)])
+ block.p.pointsCornerDpa = np.array([0 for i in range(6)])
+ block.p.pointsEdgeDpaRate = np.array([1.0e-5 for i in range(6)])
+ block.p.pointsCornerDpaRate = np.array([1.0e-5 for i in range(6)])
# Test DoseResultsMapper. Pass in full list of blocks to apply() in order
# to exercise blockList option (does not change behavior, since this is what
# apply() does anyway)
- opts = globalFluxInterface.GlobalFluxOptions("test")
+ opts = globalFluxInterface.GlobalFluxOptions("test_mapper")
opts.fromUserSettings(o.cs)
dosemapper = globalFluxInterface.DoseResultsMapper(1000, opts)
dosemapper.apply(r, blockList=r.core.getBlocks())
self.assertGreater(block.p.detailedDpa, 0)
- self.assertGreater(numpy.min(block.p.pointsCornerDpa), 0)
- self.assertGreater(numpy.min(block.p.pointsEdgeDpa), 0)
+ self.assertGreater(np.min(block.p.pointsCornerDpa), 0)
+ self.assertGreater(np.min(block.p.pointsEdgeDpa), 0)
mapper.clearFlux()
self.assertEqual(len(block.p.mgFlux), 0)
+ @patch("armi.reactor.composites.ArmiObject.getMaxParam")
+ def test_updateCycleDoseParams(self, mockGetMaxParam):
+ # set up situation
+ mockGetMaxParam.return_value = 1.23
+ o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ applyDummyFlux(r)
+ r.core.lib = isotxs.readBinary(ISOAA_PATH)
+ r.p.timeNode = 1
+ r.p.cycleLength = 365
+ opts = globalFluxInterface.GlobalFluxOptions("test_updateCycleDoseParams")
+ opts.fromUserSettings(o.cs)
+ opts.aclpDoseLimit = 100
+
+ # build the mapper
+ mapper = globalFluxInterface.DoseResultsMapper(1000, opts)
+ mapper.r = r
+
+ # test the starting position
+ self.assertEqual(mapper.r.core.p.elevationOfACLP3Cycles, 0)
+ self.assertEqual(mapper.r.core.p.elevationOfACLP7Cycles, 0)
+ self.assertEqual(mapper.r.core.p.dpaFullWidthHalfMax, 0)
+
+ # test the logs, as this case won't change the param values
+ with mockRunLogs.BufferLog() as mock:
+ self.assertEqual("", mock.getStdout())
+ runLog.LOG.startLog("test_updateCycleDoseParams")
+ runLog.LOG.setVerbosity(logging.INFO)
+ mapper.updateCycleDoseParams()
+ stdOut = mock.getStdout()
+ somethingStrange = "Something strange with detailedDpaThisCycle"
+ self.assertIn(somethingStrange, stdOut)
+ self.assertEqual(stdOut.count(somethingStrange), 3)
+
+ # test the ending position
+ self.assertEqual(mapper.r.core.p.elevationOfACLP3Cycles, 0)
+ self.assertEqual(mapper.r.core.p.elevationOfACLP7Cycles, 0)
+ self.assertEqual(mapper.r.core.p.dpaFullWidthHalfMax, 0)
+
+ def test_updateLoadpadDose(self):
+ # init test reactor
+ o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+
+ # init options
+ opts = globalFluxInterface.GlobalFluxOptions("test_updateLoadpadDose")
+ opts.fromUserSettings(o.cs)
+ opts.aclpDoseLimit = 100
+ opts.loadPadElevation = 12
+ opts.loadPadLength = 24
+
+ # init mapper
+ mapper = globalFluxInterface.DoseResultsMapper(1000, opts)
+ mapper.r = r
+
+ # test logging from updateLoadpadDose
+ with mockRunLogs.BufferLog() as mock:
+ self.assertEqual("", mock.getStdout())
+ runLog.LOG.startLog("test_updateLoadpadDose")
+ runLog.LOG.setVerbosity(logging.INFO)
+
+ mapper.updateLoadpadDose()
+ self.assertIn("Above-core load", mock.getStdout())
+ self.assertIn("The peak ACLP dose", mock.getStdout())
+ self.assertIn("The max avg", mock.getStdout())
+
def test_getDpaXs(self):
cs = settings.Settings()
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)
@@ -428,13 +528,11 @@ def test_calcReactionRates(self):
.. test:: Return the reaction rates for a given ArmiObject.
:id: T_ARMI_FLUX_RX_RATES
:tests: R_ARMI_FLUX_RX_RATES
-
- .. warning:: This does not validate the reaction rate calculation.
"""
b = test_blocks.loadTestBlock()
test_blocks.applyDummyData(b)
- self.assertEqual(b.p.rateAbs, 0.0)
- globalFluxInterface.calcReactionRates(b, 1.01, b.r.core.lib)
+ self.assertAlmostEqual(b.p.rateAbs, 0.0)
+ globalFluxInterface.calcReactionRates(b, 1.01, b.core.lib)
self.assertGreater(b.p.rateAbs, 0.0)
vfrac = b.getComponentAreaFrac(Flags.FUEL)
self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac)
@@ -442,7 +540,7 @@ def test_calcReactionRates(self):
def applyDummyFlux(r, ng=33):
- """Set arbitrary flux distribution on reactor."""
+ """Set arbitrary flux distribution on a Reactor."""
for b in r.core.getBlocks():
b.p.power = 1.0
- b.p.mgFlux = numpy.arange(ng, dtype=numpy.float64)
+ b.p.mgFlux = np.arange(ng, dtype=np.float64)
diff --git a/armi/physics/neutronics/isotopicDepletion/crossSectionTable.py b/armi/physics/neutronics/isotopicDepletion/crossSectionTable.py
index 4120de81f..a7921e3bc 100644
--- a/armi/physics/neutronics/isotopicDepletion/crossSectionTable.py
+++ b/armi/physics/neutronics/isotopicDepletion/crossSectionTable.py
@@ -24,7 +24,7 @@
import collections
from typing import List
-import numpy
+import numpy as np
from armi.nucDirectory import nucDir
@@ -106,7 +106,7 @@ def addMultiGroupXS(self, nucName, microMultiGroupXS, mgFlux, totalFlux=None):
microMultiGroupXS.np,
)
- oneGroupXS = numpy.asarray(mgCrossSections).dot(mgFlux) / totalFlux
+ oneGroupXS = np.asarray(mgCrossSections).dot(mgFlux) / totalFlux
oneGroupXSbyName = {xsType: xs for xsType, xs in zip(xsTypes, oneGroupXS)}
oneGroupXSbyName["n3n"] = 0.0
diff --git a/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py b/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
index 57a591d7e..82be2af18 100644
--- a/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
+++ b/armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
@@ -69,7 +69,7 @@ def isDepletable(obj: composites.ArmiObject):
class AbstractIsotopicDepleter:
- r"""
+ """
Interact with a depletion code.
This interface and subClasses deplete under a flux defined outside this
@@ -123,7 +123,7 @@ def getToDeplete(self):
return list(self._depleteByName.values())
def run(self):
- r"""
+ """
Submit depletion case with external solver to the cluster.
In addition to running the physics kernel, this method calls the waitForJob method
@@ -154,14 +154,12 @@ def makeXsecTable(
a list of the nucNames of active isotopes
isotxs: isotxs object
headerFormat: string (optional)
- this is the format in which the elements of the header with be returned
- -- i.e. if you use a .format() call with the case name you'll return a
- formatted list of string elements
+ this is the format in which the elements of the header with be returned -- i.e. if you use a
+ .format() call with the case name you'll return a formatted list of string elements
tableFormat: string (optional)
- this is the format in which the elements of the table with be returned
- -- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA,
- and nP you'll get the format you want. If you use a .format() call with the case name you'll return a
- formatted list of string elements
+ This is the format in which the elements of the table with be returned -- i.e. if you use a
+ .format() call with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If
+ you use a .format() call with the case name you'll return a formatted list of strings.
Results
-------
@@ -196,10 +194,12 @@ def makeXsecTable(
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
- r"""Read number density output produced by the isotopic depletion."""
+ """Read number density output produced by the isotopic depletion."""
def read(self):
- r"""Read a isotopic depletion Output File and applies results to armi objects in the ``ToDepletion`` attribute."""
+ """Read a isotopic depletion Output File and applies results to armi objects in the
+ ``ToDepletion`` attribute.
+ """
raise NotImplementedError
@@ -209,8 +209,8 @@ class Csrc:
Notes
-----
- The chemical vector is a dictionary of chemicals and their removal rate
- constant -- this works like a decay constant.
+ The chemical vector is a dictionary of chemicals and their removal rate constant -- this works
+ like a decay constant.
The isotopic vector is used to make a source material in continuous source definitions.
diff --git a/armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py b/armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py
index f03c21af5..d1782546e 100644
--- a/armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py
+++ b/armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py
@@ -20,9 +20,9 @@
import os
import shutil
+from armi import interfaces
from armi import nuclearDataIO
-from armi import interfaces, runLog
-from armi.utils import codeTiming
+from armi import runLog
from armi.physics import neutronics
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.settings import (
@@ -32,26 +32,20 @@
CONF_XS_KERNEL,
CONF_LATTICE_PHYSICS_FREQUENCY,
)
-from armi.utils.customExceptions import important
from armi.physics.neutronics import LatticePhysicsFrequency
-
+from armi.utils import codeTiming
LATTICE_PHYSICS = "latticePhysics"
-@important
-def SkippingXsGen_BuChangedLessThanTolerance(tolerance):
- return "Skipping XS Generation this cycle because median block burnups changes less than {}%".format(
- tolerance
- )
-
-
def setBlockNeutronVelocities(r, neutronVelocities):
"""
Set the ``mgNeutronVelocity`` parameter for each block using the ``neutronVelocities`` dictionary data.
Parameters
----------
+ r : Reactor
+ A Reactor object, that we want to modify.
neutronVelocities : dict
Dictionary that is keyed with the ``representativeBlock`` XS IDs with values of multigroup neutron
velocity data computed by MC2.
@@ -118,7 +112,7 @@ def interactBOC(self, cycle=0):
Notes
-----
:py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC`
- also calls this if the ``runLatticePhysicsBeforeShuffling``setting is True.
+ also calls this if the ``runLatticePhysicsBeforeShuffling`` setting is True.
This happens because branch searches may need XS.
"""
if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC:
@@ -504,9 +498,6 @@ def _checkBurnupThresholds(self, blockList):
"Recalculating Cross-sections".format(xsID, buOld, buNow)
)
- if not idsChangedBurnup:
- SkippingXsGen_BuChangedLessThanTolerance(self._burnupTolerance)
-
return idsChangedBurnup
def _getProcessesPerNode(self):
diff --git a/armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py b/armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py
index 5d1faf6b5..85c2fe3ef 100644
--- a/armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py
+++ b/armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py
@@ -24,7 +24,7 @@
import math
import collections
-import numpy
+import numpy as np
import ordered_set
from armi import runLog
@@ -351,9 +351,7 @@ def _getFuelTemperature(self):
if not fuelComponents:
fuelTemperatureInC = self.block.getAverageTempInC()
else:
- fuelTemperatureInC = numpy.mean(
- [fc.temperatureInC for fc in fuelComponents]
- )
+ fuelTemperatureInC = np.mean([fc.temperatureInC for fc in fuelComponents])
if not fuelTemperatureInC or math.isnan(fuelTemperatureInC):
raise ValueError(
"The fuel temperature of block {0} is {1} and is not valid".format(
diff --git a/armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py b/armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py
index b88c8cf58..c8b6ed329 100644
--- a/armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py
+++ b/armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py
@@ -65,7 +65,7 @@ def setUpClass(cls):
cls.o.r.core = Core("testCore")
# add an assembly with a single block
cls.assembly = HexAssembly("testAssembly")
- cls.assembly.spatialGrid = grids.axialUnitGrid(1)
+ cls.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)
cls.assembly.spatialGrid.armiObject = cls.assembly
cls.assembly.add(buildSimpleFuelBlock())
# cls.o.r.core.add(assembly)
@@ -209,6 +209,9 @@ def test_interactAll(self):
self.latticeInterface.interactCoupled(iteration=1)
self.assertIsNone(self.o.r.core.lib)
+ def test_getSuffix(self):
+ self.assertEqual(self.latticeInterface._getSuffix(7), "")
+
class TestLatticePhysicsLibraryCreation(TestLatticePhysicsInterfaceBase):
"""Test variations of _newLibraryShouldBeCreated."""
diff --git a/armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py b/armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py
index 7c40eca1f..f7f1f8e2c 100644
--- a/armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py
+++ b/armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py
@@ -20,6 +20,9 @@
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
)
+from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (
+ setBlockNeutronVelocities,
+)
from armi.physics.neutronics.latticePhysics.latticePhysicsWriter import (
LatticePhysicsWriter,
)
@@ -66,6 +69,13 @@ def setUp(self):
self.block = self.r.core.getFirstBlock()
self.w = FakeLatticePhysicsWriter(self.block, self.r, self.o)
+ def test_setBlockNeutronVelocities(self):
+ d = defaultdict(float)
+ d["AA"] = 10.0
+ setBlockNeutronVelocities(self.r, d)
+ tot = sum([b.p.mgNeutronVelocity for b in self.r.core.getBlocks()])
+ self.assertGreater(tot, 3000.0)
+
def test_latticePhysicsWriter(self):
"""Super basic test of the LatticePhysicsWriter."""
self.assertEqual(self.w.xsId, "AA")
diff --git a/armi/physics/neutronics/parameters.py b/armi/physics/neutronics/parameters.py
index 269598980..ef61fd680 100644
--- a/armi/physics/neutronics/parameters.py
+++ b/armi/physics/neutronics/parameters.py
@@ -15,8 +15,8 @@
"""
Parameter definitions for the Neutronics Plugin.
-We hope neutronics plugins that compute flux will use ``mgFlux``, etc.,
-which will enable modular construction of apps.
+We hope neutronics plugins that compute flux will use ``mgFlux``, etc., which will enable modular
+construction of apps.
"""
from armi.physics.neutronics.settings import CONF_DPA_PER_FLUENCE
from armi.reactor import parameters
@@ -86,7 +86,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"mgFluxGamma",
- units=f"{units.GRAMS}*{units.CM}/{units.SECONDS}",
+ units=f"#*{units.CM}/{units.SECONDS}",
description="multigroup gamma flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
@@ -110,7 +110,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"extSrc",
- units=f"{units.GRAMS}/{units.CM}^3/{units.SECONDS}",
+ units=f"#/{units.CM}^3/{units.SECONDS}",
description="multigroup external source",
location=ParamLocation.AVERAGE,
saveToDB=False,
@@ -120,7 +120,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"mgGammaSrc",
- units=f"{units.GRAMS}/{units.CM}^3/{units.SECONDS}",
+ units=f"#/{units.CM}^3/{units.SECONDS}",
description="multigroup gamma source",
location=ParamLocation.AVERAGE,
saveToDB=True,
@@ -133,7 +133,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"gammaSrc",
- units=f"{units.GRAMS}/{units.CM}^3/{units.SECONDS}",
+ units=f"#/{units.CM}^3/{units.SECONDS}",
description="gamma source",
location=ParamLocation.AVERAGE,
saveToDB=True,
@@ -144,7 +144,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"mgFluxSK",
units=f"n*{units.CM}/{units.SECONDS}",
- description="multigroup volume-integrated flux stored for multiple time steps in spatial kinetics (2-D array)",
+ description=(
+ "multigroup volume-integrated flux stored for multiple time steps in "
+ "spatial kinetics (2-D array)"
+ ),
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=False,
categories=[
@@ -160,9 +163,9 @@ def _getNeutronicsBlockParams():
"pinMgFluxes",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="""
- The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g for pin i. Flux
- units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which is counter-clockwise from 3
- o'clock.
+ The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g
+ for pin i. Flux units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which
+ is counter-clockwise from 3 o'clock.
""",
categories=[parameters.Category.pinQuantities],
saveToDB=True,
@@ -180,7 +183,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"pinMgFluxesGamma",
- units=f"{units.GRAMS}/{units.CM}^2/{units.SECONDS}",
+ units=f"#/{units.CM}^2/{units.SECONDS}",
description="should be a blank 3-D array, but re-defined later (ng x nPins x nAxialSegments)",
categories=[parameters.Category.pinQuantities, parameters.Category.gamma],
saveToDB=False,
@@ -248,12 +251,13 @@ def _getNeutronicsBlockParams():
"linPow",
units=f"{units.WATTS}/{units.METERS}",
description=(
- "Pin-averaged linear heat rate, which is calculated by evaluating the block power and dividing "
- "by the number of pins. If gamma transport is enabled, then this represents the combined "
- "neutron and gamma heating. If gamma transport is disabled then this represents the energy "
- "generation in the pin, where gammas are assumed to deposit their energy locally. Note that this "
- "value does not implicitly account for axial and radial peaking factors within the block. Use `linPowByPin` "
- "for obtaining the pin linear heat rate with peaking factors included."
+ "Pin-averaged linear heat rate, which is calculated by evaluating the block power "
+ "and dividing by the number of pins. If gamma transport is enabled, then this "
+ "represents the combined neutron and gamma heating. If gamma transport is disabled "
+ "then this represents the energy generation in the pin, where gammas are assumed to "
+ "deposit their energy locally. Note that this value does not implicitly account "
+ "for axial and radial peaking factors within the block. Use `linPowByPin` for "
+ "obtaining the pin linear heat rate with peaking factors included."
),
location=ParamLocation.AVERAGE,
default=0.0,
@@ -270,9 +274,9 @@ def _getNeutronicsBlockParams():
description=(
"Pin linear linear heat rate, which is calculated through flux reconstruction and "
"accounts for axial and radial peaking factors. This differs from the `linPow` "
- "parameter, which assumes no axial and radial peaking in the block as this information "
- "is unavailable without detailed flux reconstruction. The same application of neutron and gamma "
- "heating results applies."
+ "parameter, which assumes no axial and radial peaking in the block as this "
+ "information is unavailable without detailed flux reconstruction. The same "
+ "application of neutron and gamma heating results applies."
),
location=ParamLocation.CHILDREN,
categories=[parameters.Category.pinQuantities],
@@ -524,7 +528,7 @@ def _getNeutronicsBlockParams():
pb.defParam(
"fluxGamma",
- units=f"{units.GRAMS}/{units.CM}^2/{units.SECONDS}",
+ units=f"#/{units.CM}^2/{units.SECONDS}",
description="Gamma scalar flux",
categories=[
parameters.Category.retainOnReplacement,
@@ -673,7 +677,10 @@ def _getNeutronicsBlockParams():
"detailedDpaThisCycle",
units=units.DPA,
location=ParamLocation.AVERAGE,
- description="Displacement per atom accumulated during this cycle. This accumulates over a cycle and resets to zero at BOC.",
+ description=(
+ "Displacement per atom accumulated during this cycle. This accumulates "
+ "over a cycle and resets to zero at BOC."
+ ),
categories=[
parameters.Category.cumulativeOverCycle,
parameters.Category.detailedAxialExpansion,
@@ -691,7 +698,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"dpaPeakFromFluence",
units=units.DPA,
- description=f"DPA approximation based on a fluence conversion factor set in the {CONF_DPA_PER_FLUENCE} setting",
+ description=(
+ "DPA approximation based on a fluence conversion factor set in the "
+ f"{CONF_DPA_PER_FLUENCE} setting"
+ ),
location=ParamLocation.MAX,
categories=[
parameters.Category.cumulative,
@@ -724,7 +734,10 @@ def _getNeutronicsBlockParams():
pb.defParam(
"pdensGenerated",
units=f"{units.WATTS}/{units.CM}^3",
- description="Volume-averaged generated power density. Different than b.p.pdens only when gamma transport is activated.",
+ description=(
+ "Volume-averaged generated power density. Different than b.p.pdens only "
+ "when gamma transport is activated."
+ ),
location=ParamLocation.AVERAGE,
categories=[parameters.Category.gamma],
)
diff --git a/armi/physics/neutronics/settings.py b/armi/physics/neutronics/settings.py
index fcbb49c12..f9e23cabc 100644
--- a/armi/physics/neutronics/settings.py
+++ b/armi/physics/neutronics/settings.py
@@ -16,11 +16,11 @@
import os
from armi import runLog
-from armi.operators import settingsValidation
+from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.const import NEUTRON
from armi.physics.neutronics.energyGroups import GROUP_STRUCTURE
-from armi.physics.neutronics import LatticePhysicsFrequency
from armi.settings import setting
+from armi.settings import settingsValidation
from armi.utils import directoryChangers
from armi.settings.fwSettings.globalSettings import (
CONF_DETAILED_AXIAL_EXPANSION,
diff --git a/armi/physics/neutronics/tests/test_crossSectionManager.py b/armi/physics/neutronics/tests/test_crossSectionManager.py
index dc4f80379..80b5cac04 100644
--- a/armi/physics/neutronics/tests/test_crossSectionManager.py
+++ b/armi/physics/neutronics/tests/test_crossSectionManager.py
@@ -17,10 +17,10 @@
:py:mod:`armi.physics.neutronics.crossSectionGroupManager`
"""
-from io import BytesIO
import copy
import os
import unittest
+from io import BytesIO
from unittest.mock import MagicMock
from six.moves import cPickle
@@ -29,35 +29,34 @@
from armi.physics.neutronics import crossSectionGroupManager
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.crossSectionGroupManager import (
+ AverageBlockCollection,
BlockCollection,
+ CrossSectionGroupManager,
FluxWeightedAverageBlockCollection,
-)
-from armi.physics.neutronics.crossSectionGroupManager import (
MedianBlockCollection,
- AverageBlockCollection,
)
-from armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager
+from armi.physics.neutronics.crossSectionSettings import XSModelingOptions
from armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct
from armi.physics.neutronics.settings import (
- CONF_XS_BLOCK_REPRESENTATION,
CONF_LATTICE_PHYSICS_FREQUENCY,
+ CONF_XS_BLOCK_REPRESENTATION,
)
from armi.reactor.blocks import HexBlock
from armi.reactor.flags import Flags
-from armi.reactor.tests import test_reactors, test_blocks
-from armi.tests import TEST_ROOT
-from armi.tests import mockRunLogs
+from armi.reactor.tests import test_blocks, test_reactors
+from armi.tests import TEST_ROOT, mockRunLogs
from armi.utils import units
from armi.utils.directoryChangers import TemporaryDirectoryChanger
-
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestBlockCollection(unittest.TestCase):
def setUp(self):
self.blockList = makeBlocks()
- self.bc = BlockCollection(self.blockList[0].r.blueprints.allNuclidesInProblem)
+ self.bc = BlockCollection(
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
+ )
self.bc.extend(self.blockList)
def test_add(self):
@@ -88,7 +87,7 @@ def setUp(self):
b.p.percentBu = bi / 4.0 * 100
self.blockList[0], self.blockList[2] = self.blockList[2], self.blockList[0]
self.bc = MedianBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
self.bc.extend(self.blockList)
@@ -125,7 +124,7 @@ def setUpClass(cls):
def setUp(self):
self.bc = AverageBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
self.bc.extend(self.blockList)
self.bc.averageByComponent = True
@@ -160,7 +159,7 @@ def test_createRepresentativeBlock(self):
# check that a new block collection of the representative block has right temperatures
# this is required for Doppler coefficient calculations
newBc = AverageBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
newBc.append(avgB)
newBc.calcAvgNuclideTemperatures()
@@ -199,7 +198,7 @@ def test_createRepresentativeBlockDissimilar(self):
# U35 has different average temperature because blocks have different U235 content
newBc = AverageBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
newBc.append(avgB)
newBc.calcAvgNuclideTemperatures()
@@ -251,14 +250,15 @@ def setUpClass(cls):
def setUp(self):
self.bc = AverageBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
blockCopies = [copy.deepcopy(b) for b in self.blockList]
self.bc.extend(blockCopies)
def test_getAverageComponentNumberDensities(self):
"""Test component number density averaging."""
- # becaue of the way densities are set up, the middle block (index 1 of 0-2) component densities are equivalent to the average
+ # becaue of the way densities are set up, the middle block (index 1 of 0-2) component
+ # densities are equivalent to the average
b = self.bc[1]
for compIndex, c in enumerate(b.getComponents()):
avgDensities = self.bc._getAverageComponentNumberDensities(compIndex)
@@ -320,7 +320,7 @@ def test_getAverageComponentTemperatureNoMass(self):
class TestBlockCollectionComponentAverage(unittest.TestCase):
- r"""tests for ZPPR 1D XS gen cases."""
+ """Tests for ZPPR 1D XS gen cases."""
def setUp(self):
r"""
@@ -674,7 +674,7 @@ def setUpClass(cls):
def setUp(self):
self.bc = FluxWeightedAverageBlockCollection(
- self.blockList[0].r.blueprints.allNuclidesInProblem
+ self.blockList[0].core.r.blueprints.allNuclidesInProblem
)
self.bc.extend(self.blockList)
@@ -695,12 +695,22 @@ class TestCrossSectionGroupManager(unittest.TestCase):
def setUp(self):
cs = settings.Settings()
self.blockList = makeBlocks(20)
- self.csm = CrossSectionGroupManager(self.blockList[0].r, cs)
+ self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs)
for bi, b in enumerate(self.blockList):
b.p.percentBu = bi / 19.0 * 100
self.csm._setBuGroupBounds([3, 10, 30, 100])
self.csm.interactBOL()
+ def test_enableBuGroupUpdates(self):
+ self.csm._buGroupUpdatesEnabled = False
+ self.csm.enableBuGroupUpdates()
+ self.assertTrue(self.csm.enableBuGroupUpdates)
+
+ def test_disableBuGroupUpdates(self):
+ self.csm._buGroupUpdatesEnabled = False
+ res = self.csm.disableBuGroupUpdates()
+ self.assertFalse(res)
+
def test_updateBurnupGroups(self):
self.blockList[1].p.percentBu = 3.1
self.blockList[2].p.percentBu = 10.0
@@ -774,15 +784,14 @@ def test_getRepresentativeBlocks(self):
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
self.csm.r = r
- # Assumption: All sodium in fuel blocks for this test is 450 C and this is the
- # expected sodium temperature.
- # These lines of code take the first sodium block and decrease the temperature of the block,
- # but change the atom density to approximately zero.
- # Checking later on the nuclide temperature of sodium is asserted to be still 450.
- # This perturbation proves that altering the temperature of an component with near zero atom density
- # does not affect the average temperature of the block collection.
- # This demonstrates that the temperatures of a block collection are atom weighted rather than just the
- # average temperature.
+ # Assumption: All sodium in fuel blocks for this test is 450 C and this is the expected
+ # sodium temperature. These lines of code take the first sodium block and decrease the
+ # temperature of the block, but change the atom density to approximately zero. Checking
+ # later on the nuclide temperature of sodium is asserted to be still 450. This perturbation
+ # proves that altering the temperature of an component with near zero atom density does not
+ # affect the average temperature of the block collection. This demonstrates that the
+ # temperatures of a block collection are atom weighted rather than just the average
+ # temperature.
regularFuel = r.core.getFirstBlock(Flags.FUEL, exact=True)
intercoolant = regularFuel.getComponent(Flags.INTERCOOLANT)
intercoolant.setTemperature(100) # just above melting
@@ -817,16 +826,29 @@ def test_getRepresentativeBlocks(self):
self.assertIsNone(blocks[0].p.detailedNDens)
self.assertIsNone(blocks[1].p.detailedNDens)
- def test_createRepresentativeBlocksUsingExistingBlocks(self):
- """
- Demonstrates that a new representative block can be generated from an existing representative block.
-
- Notes
- -----
- This tests that the XS ID of the new representative block is correct and that the compositions are identical
- between the original and the new representative blocks.
- """
- _o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ def _createRepresentativeBlocksUsingExistingBlocks(self, validBlockTypes):
+ """Reusable code used in multiple unit tests."""
+ o, r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ # set a few random non-default settings on AA to be copied to the new BA group
+ o.cs[CONF_CROSS_SECTION].update(
+ {
+ "AA": XSModelingOptions(
+ "AA",
+ geometry="0D",
+ averageByComponent=True,
+ xsMaxAtomNumber=60,
+ criticalBuckling=False,
+ xsPriority=2,
+ )
+ }
+ )
+ o.cs[CONF_CROSS_SECTION].setDefaults(
+ crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, validBlockTypes
+ )
+ aaSettings = o.cs[CONF_CROSS_SECTION]["AA"]
+ self.csm.cs = copy.deepcopy(o.cs)
self.csm.createRepresentativeBlocks()
unperturbedReprBlocks = copy.deepcopy(self.csm.representativeBlocks)
self.assertNotIn("BA", unperturbedReprBlocks)
@@ -849,6 +871,38 @@ def test_createRepresentativeBlocksUsingExistingBlocks(self):
)
self.assertEqual(origXSIDsFromNew["BA"], "AA")
+ # check that settings were copied correctly
+ baSettings = self.csm.cs[CONF_CROSS_SECTION]["BA"]
+ self.assertEqual(baSettings.xsID, "BA")
+ for setting, baSettingValue in baSettings.__dict__.items():
+ if setting == "xsID":
+ continue
+ self.assertEqual(baSettingValue, aaSettings.__dict__[setting])
+
+ def test_createRepresentativeBlocksUsingExistingBlocks(self):
+ """
+ Demonstrates that a new representative block can be generated from an existing
+ representative block.
+
+ Notes
+ -----
+ This tests that the XS ID of the new representative block is correct and that the
+ compositions are identical between the original and the new representative blocks.
+ """
+ self._createRepresentativeBlocksUsingExistingBlocks(["fuel"])
+
+ def test_createRepresentativeBlocksUsingExistingBlocksDisableValidBlockTypes(self):
+ """
+ Demonstrates that a new representative block can be generated from an existing
+ representative block with the setting `disableBlockTypeExclusionInXsGeneration: true`.
+
+ Notes
+ -----
+ This tests that the XS ID of the new representative block is correct and that the
+ compositions are identical between the original and the new representative blocks.
+ """
+ self._createRepresentativeBlocksUsingExistingBlocks(True)
+
def test_interactBOL(self):
"""Test `BOL` lattice physics update frequency.
@@ -857,7 +911,7 @@ def test_interactBOL(self):
:tests: R_ARMI_XSGM_FREQ
"""
self.assertFalse(self.csm.representativeBlocks)
- self.blockList[0].r.p.timeNode = 0
+ self.blockList[0].core.r.p.timeNode = 0
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOL"
self.csm.interactBOL()
self.assertTrue(self.csm.representativeBlocks)
@@ -870,7 +924,7 @@ def test_interactBOC(self):
:tests: R_ARMI_XSGM_FREQ
"""
self.assertFalse(self.csm.representativeBlocks)
- self.blockList[0].r.p.timeNode = 0
+ self.blockList[0].core.r.p.timeNode = 0
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOC"
self.csm.interactBOL()
self.csm.interactBOC()
@@ -879,7 +933,8 @@ def test_interactBOC(self):
def test_interactEveryNode(self):
"""Test `everyNode` lattice physics update frequency.
- .. test:: The cross-section group manager frequency depends on the LPI frequency at every time node.
+ .. test:: The cross-section group manager frequency depends on the LPI frequency at every
+ time node.
:id: T_ARMI_XSGM_FREQ2
:tests: R_ARMI_XSGM_FREQ
"""
@@ -895,7 +950,8 @@ def test_interactEveryNode(self):
def test_interactFirstCoupledIteration(self):
"""Test `firstCoupledIteration` lattice physics update frequency.
- .. test:: The cross-section group manager frequency depends on the LPI frequency during first coupled iteration.
+ .. test:: The cross-section group manager frequency depends on the LPI frequency during
+ first coupled iteration.
:id: T_ARMI_XSGM_FREQ3
:tests: R_ARMI_XSGM_FREQ
"""
@@ -937,10 +993,12 @@ def test_xsgmIsRunBeforeXS(self):
def test_copyPregeneratedFiles(self):
"""
- Tests copying pre-generated cross section and flux files
- using reactor that is built from a case settings file.
+ Tests copying pre-generated cross section and flux files using reactor that is built from a
+ case settings file.
"""
- o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ o, r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
# Need to overwrite the relative paths with absolute
o.cs[CONF_CROSS_SECTION]["XA"].xsFileLocation = [
os.path.join(THIS_DIR, "ISOXA")
@@ -973,6 +1031,5 @@ def test_conversion_2digit(self):
def makeBlocks(howMany=20):
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
- return r.core.getBlocks(Flags.FUEL)[
- 3 : howMany + 3
- ] # shift y 3 to skip central assemblies 1/3 volume
+ # shift y 3 to skip central assemblies 1/3 volume
+ return r.core.getBlocks(Flags.FUEL)[3 : howMany + 3]
diff --git a/armi/physics/neutronics/tests/test_crossSectionTable.py b/armi/physics/neutronics/tests/test_crossSectionTable.py
index b77387606..8dfc878b8 100644
--- a/armi/physics/neutronics/tests/test_crossSectionTable.py
+++ b/armi/physics/neutronics/tests/test_crossSectionTable.py
@@ -20,7 +20,6 @@
isotopicDepletionInterface as idi,
)
from armi.physics.neutronics.latticePhysics import ORDER
-from armi.reactor.flags import Flags
from armi.reactor.tests.test_blocks import loadTestBlock
from armi.reactor.tests.test_reactors import loadTestReactor
from armi.settings import Settings
@@ -37,7 +36,7 @@ def test_makeTable(self):
"""
obj = loadTestBlock()
obj.p.mgFlux = range(33)
- core = obj.getAncestorWithFlags(Flags.CORE)
+ core = obj.parent.parent
core.lib = isotxs.readBinary(ISOAA_PATH)
table = crossSectionTable.makeReactionRateTable(obj)
@@ -60,7 +59,9 @@ def test_isotopicDepletionInterface(self):
:id: T_ARMI_DEPL_ABC
:tests: R_ARMI_DEPL_ABC
"""
- _o, r = loadTestReactor()
+ _o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
cs = Settings()
aid = idi.AbstractIsotopicDepleter(r, cs)
diff --git a/armi/physics/neutronics/tests/test_macroXSGenerationInterface.py b/armi/physics/neutronics/tests/test_macroXSGenerationInterface.py
index 9214916a5..6131c17a8 100644
--- a/armi/physics/neutronics/tests/test_macroXSGenerationInterface.py
+++ b/armi/physics/neutronics/tests/test_macroXSGenerationInterface.py
@@ -20,7 +20,7 @@
from armi.physics.neutronics.macroXSGenerationInterface import (
MacroXSGenerationInterface,
)
-from armi.reactor.tests.test_reactors import loadTestReactor, reduceTestReactorRings
+from armi.reactor.tests.test_reactors import loadTestReactor
from armi.settings import Settings
from armi.tests import ISOAA_PATH
@@ -34,8 +34,9 @@ def test_macroXSGenerationInterfaceBasics(self):
:tests: R_ARMI_MACRO_XS
"""
cs = Settings()
- _o, r = loadTestReactor()
- reduceTestReactorRings(r, cs, 2)
+ _o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
# Before: verify there are no macro XS on each block
for b in r.core.getBlocks():
diff --git a/armi/physics/neutronics/tests/test_neutronicsPlugin.py b/armi/physics/neutronics/tests/test_neutronicsPlugin.py
index d10c510af..00c855ada 100644
--- a/armi/physics/neutronics/tests/test_neutronicsPlugin.py
+++ b/armi/physics/neutronics/tests/test_neutronicsPlugin.py
@@ -19,7 +19,6 @@
from ruamel.yaml import YAML
from armi import getPluginManagerOrFail, settings, tests
-from armi.operators import settingsValidation
from armi.physics import neutronics
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.settings import (
@@ -33,8 +32,9 @@
CONF_LATTICE_PHYSICS_FREQUENCY,
getNeutronicsSettingValidators,
)
-from armi.settings.fwSettings.globalSettings import CONF_RUN_TYPE
from armi.settings import caseSettings
+from armi.settings import settingsValidation
+from armi.settings.fwSettings.globalSettings import CONF_RUN_TYPE
from armi.tests import TEST_ROOT
from armi.tests.test_plugins import TestPlugin
from armi.utils import directoryChangers
diff --git a/armi/physics/thermalHydraulics/__init__.py b/armi/physics/thermalHydraulics/__init__.py
index af9b6d853..eff87db6a 100644
--- a/armi/physics/thermalHydraulics/__init__.py
+++ b/armi/physics/thermalHydraulics/__init__.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Thermal Hydraulics package."""
-from .plugin import ThermalHydraulicsPlugin # noqa: unused-import
+from armi.physics.thermalHydraulics.plugin import ThermalHydraulicsPlugin # noqa: F401
diff --git a/armi/physics/thermalHydraulics/parameters.py b/armi/physics/thermalHydraulics/parameters.py
index 88e6384ec..a6af2c1ba 100644
--- a/armi/physics/thermalHydraulics/parameters.py
+++ b/armi/physics/thermalHydraulics/parameters.py
@@ -178,14 +178,16 @@ def _getBlockParams():
pb.defParam(
"THaverageCladTemp",
units=units.DEGC,
- description="The nominal average clad temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average clad temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"THaverageGapTemp",
units=units.DEGC,
- description="The nominal average gap temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average gap temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
saveToDB=True,
)
@@ -193,7 +195,8 @@ def _getBlockParams():
pb.defParam(
"THaverageDuctTemp",
units=units.DEGC,
- description="The nominal average duct temperature in the block, which should be used for neutronic and TH feedback.",
+ description="The nominal average duct temperature in the block, which should be used "
+ "for neutronic and TH feedback.",
location=ParamLocation.AVERAGE,
)
diff --git a/armi/physics/thermalHydraulics/settings.py b/armi/physics/thermalHydraulics/settings.py
index a8df91ed2..3975a35e7 100644
--- a/armi/physics/thermalHydraulics/settings.py
+++ b/armi/physics/thermalHydraulics/settings.py
@@ -34,7 +34,7 @@ def defineSettings():
),
setting.Setting(
CONF_TH_KERNEL,
- default=False,
+ default="",
label="Thermal Hydraulics Kernel",
description="Name of primary T/H solver in this run",
),
diff --git a/armi/physics/neutronics/isotopicDepletion/__init__.py b/armi/physics/thermalHydraulics/tests/__init__.py
similarity index 63%
rename from armi/physics/neutronics/isotopicDepletion/__init__.py
rename to armi/physics/thermalHydraulics/tests/__init__.py
index 8959eccdb..84776603f 100644
--- a/armi/physics/neutronics/isotopicDepletion/__init__.py
+++ b/armi/physics/thermalHydraulics/tests/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2019 TerraPower, LLC
+# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,13 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-"""The depletion physics package contains utility/framework code related to the physics of transmutation and decay."""
-# ruff: noqa: F401
-import os
-
-from armi import RES
-from armi.nucDirectory import nuclideBases
-from armi import interfaces
-
-ORDER = interfaces.STACK_ORDER.DEPLETION
diff --git a/armi/physics/thermalHydraulics/tests/test_thermalHydraulicsPlugin.py b/armi/physics/thermalHydraulics/tests/test_thermalHydraulicsPlugin.py
new file mode 100644
index 000000000..3c9938abc
--- /dev/null
+++ b/armi/physics/thermalHydraulics/tests/test_thermalHydraulicsPlugin.py
@@ -0,0 +1,41 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the thermal hydraulics plugin."""
+from armi.physics import thermalHydraulics
+from armi.physics.thermalHydraulics.settings import CONF_DO_TH, CONF_TH_KERNEL
+from armi.settings import caseSettings
+from armi.tests.test_plugins import TestPlugin
+
+
+class TestThermalHydraulicsPlugin(TestPlugin):
+ plugin = thermalHydraulics.ThermalHydraulicsPlugin
+
+ def test_thermalHydraulicsSettingsLoaded(self):
+ """Test that the thermal hydraulics case settings are loaded."""
+ cs = caseSettings.Settings()
+
+ self.assertIn(CONF_DO_TH, cs)
+ self.assertIn(CONF_TH_KERNEL, cs)
+
+ def test_thermalHydraulicsSettingsSet(self):
+ """Test that the thermal hydraulics case settings are applied correctly."""
+ cs = caseSettings.Settings()
+ thKernelName = "testKernel"
+
+ cs[CONF_DO_TH] = True
+ cs[CONF_TH_KERNEL] = thKernelName
+
+ self.assertTrue(cs[CONF_DO_TH])
+ self.assertEqual(cs[CONF_TH_KERNEL], thKernelName)
diff --git a/armi/plugins.py b/armi/plugins.py
index 61d0830c5..31c72a175 100644
--- a/armi/plugins.py
+++ b/armi/plugins.py
@@ -49,8 +49,7 @@
- :py:mod:`armi.settings` and their validators
- :py:mod:`armi.reactor.components` for custom geometry
- :py:mod:`armi.reactor.flags` for custom reactor components
-- :py:mod:`armi.interfaces` to define new calculation sequences and interactions with
- new codes
+- :py:mod:`armi.interfaces` to define new calculation sequences and interactions with new codes
- :py:mod:`armi.reactor.parameters` to represent new physical state on the reactor
- :py:mod:`armi.materials` for custom materials
- Elements of the :py:mod:`armi.gui`
@@ -89,34 +88,35 @@
deliberate design choice to keep the plugin system simple and to preclude a large class
of potential bugs. At some point it may make sense to revisit this.
-Other customization points
---------------------------
+**Other customization points**
+
While the Plugin API is the main place for ARMI framework customization, there are
several other areas where ARMI may be extended or customized. These typically pre-dated
the Plugin-based architecture, and as the need arise may be migrated to here.
- - Component types: Component types are registered dynamically through some metaclass
- magic, found in :py:class:`armi.reactor.components.component.ComponentType` and
- :py:class:`armi.reactor.composites.CompositeModelType`. Simply defining a new
- Component subclass should register it with the appropriate ARMI systems. While this
- is convenient, it does lead to potential issues, as the behavior of ARMI becomes
- sensitive to module import order and the like; the containing module needs to be
- imported before the registration occurs, which can be surprising.
-
- - Interface input files: Interfaces used to be discovered dynamically, rather than
- explicitly as they are now in the :py:meth:`armi.plugins.ArmiPlugin.exposeInterfaces`
- plugin hook. Essentially they functioned as ersatz plugins. One of the ways that they
- would customize ARMI behavior is through the
- :py:meth:`armi.physics.interface.Interface.specifyInputs` static method, which is
- still used to determine inter-Case dependencies and support cloning and hashing Case
- inputs. Going forward, this approach will likely be deprecated in favor of a plugin
- hook.
-
- - Fuel handler logic: The
- :py:class:`armi.physics.fuelCycle.fuelHandlers.FuelHandlerInterface` supports
- customization through the dynamic loading of fuel handler logic modules, based on
- user settings. This also predated the plugin infrastructure, and may one day be
- replaced with plugin-based fuel handler logic.
+ - Component types: Component types are registered dynamically through some metaclass
+ magic, found in :py:class:`armi.reactor.components.component.ComponentType` and
+ :py:class:`armi.reactor.composites.CompositeModelType`. Simply defining a new
+ Component subclass should register it with the appropriate ARMI systems. While this
+ is convenient, it does lead to potential issues, as the behavior of ARMI becomes
+ sensitive to module import order and the like; the containing module needs to be
+ imported before the registration occurs, which can be surprising.
+
+ - Interface input files: Interfaces used to be discovered dynamically, rather than
+ explicitly as they are now in the :py:meth:`armi.plugins.ArmiPlugin.exposeInterfaces`
+ plugin hook. Essentially they functioned as ersatz plugins. One of the ways that they
+ would customize ARMI behavior is through the
+ :py:meth:`armi.physics.interface.Interface.specifyInputs` static method, which is
+ still used to determine inter-Case dependencies and support cloning and hashing Case
+ inputs. Going forward, this approach will likely be deprecated in favor of a plugin
+ hook.
+
+ - Fuel handler logic: The
+ :py:class:`armi.physics.fuelCycle.fuelHandlers.FuelHandlerInterface` supports
+ customization through the dynamic loading of fuel handler logic modules, based on
+ user settings. This also predated the plugin infrastructure, and may one day be
+ replaced with plugin-based fuel handler logic.
+
"""
from typing import Callable, Dict, List, Union, TYPE_CHECKING
@@ -125,11 +125,9 @@
from armi import pluginManager
from armi.utils import flags
-# Not used during runtime so we could have a coverage drop here. Add the
-# pragma line to tell coverage.py to skip this
-# https://coverage.readthedocs.io/en/stable/excluding.html
-if TYPE_CHECKING: # pragma: no cover
+if TYPE_CHECKING:
from armi.reactor.composites import Composite
+ from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger
HOOKSPEC = pluggy.HookspecMarker("armi")
@@ -309,10 +307,12 @@ def defineBlockTypes() -> List:
corresponding ``Component`` type that should activate it. For instance a
``HexBlock`` would be created when the largest component is a ``Hexagon``::
+ [(Hexagon, HexBlock)]
+
Returns
-------
list
- [(Hexagon, HexBlock)]
+ ``[(compType, BlockType), ...]``
"""
@staticmethod
@@ -337,11 +337,13 @@ def defineAssemblyTypes() -> List:
Example
-------
- [
- (HexBlock, HexAssembly),
- (CartesianBlock, CartesianAssembly),
- (ThRZBlock, ThRZAssembly),
- ]
+ .. code::
+
+ [
+ (HexBlock, HexAssembly),
+ (CartesianBlock, CartesianAssembly),
+ (ThRZBlock, ThRZAssembly),
+ ]
Returns
-------
@@ -363,16 +365,16 @@ def defineBlueprintsSections() -> List:
list
(name, section, resolutionMethod) tuples, where:
- - name : The name of the attribute to add to the Blueprints class; this
- should be a valid Python identifier.
+ - name : The name of the attribute to add to the Blueprints class; this
+ should be a valid Python identifier.
- - section : An instance of ``yaml.Attribute`` defining the data that is
- described by the Blueprints section.
+ - section : An instance of ``yaml.Attribute`` defining the data that is
+ described by the Blueprints section.
- - resolutionMethod : A callable that takes a Blueprints object and case
- settings as arguments. This will be called like an unbound instance
- method on the passed Blueprints object to initialize the state of the new
- Blueprints section.
+ - resolutionMethod : A callable that takes a Blueprints object and case
+ settings as arguments. This will be called like an unbound instance
+ method on the passed Blueprints object to initialize the state of the new
+ Blueprints section.
Notes
-----
@@ -661,6 +663,45 @@ def defineSystemBuilders() -> Dict[str, Callable[[str], "Composite"]]:
and a ``"sfp"`` lookup, triggered to run after all other hooks have been run.
"""
+ @staticmethod
+ @HOOKSPEC(firstresult=True)
+ def getAxialExpansionChanger() -> type["AxialExpansionChanger"]:
+ """Produce the class responsible for performing axial expansion.
+
+ Plugins can provide this hook to override or negate axial expansion.
+ Will be used during initial construction of the core and assemblies, and
+ can be a class to perform custom axial expansion routines.
+
+ The first object returned that is not ``None`` will be used.
+ Plugins are encouraged to add the ``tryfirst=True`` arguments to their
+ ``HOOKIMPL`` invocations to make sure their specific are earlier in the
+ hook call sequence.
+
+ Returns
+ -------
+ type of :class:`armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`
+
+ Notes
+ -----
+ This hook **should not** provide an instance of the class. The construction
+ of the changer will be handled by applications and plugins that need it.
+
+ This hook should only be provided by one additional plugin in your application. Otherwise
+ the `order of hook execution `_
+ may not provide the behavior you expect.
+
+ Examples
+ --------
+ >>> class MyPlugin(ArmiPlugin):
+ ... @staticmethod
+ ... @HOOKIMPL(tryfirst=True)
+ ... def getAxialExpansionChanger():
+ ... from myproject.physics import BespokeAxialExpansion
+ ...
+ ... return BespokeAxialExpansion
+
+ """
+
class UserPlugin(ArmiPlugin):
"""
@@ -807,7 +848,7 @@ class PluginError(RuntimeError):
These should always come from some form of programmer error, and indicates
conditions such as:
- - A plugin improperly implementing a hook, when possible to detect.
- - A collision between components provided by plugins (e.g. two plugins providing
- the same Blueprints section)
+ - A plugin improperly implementing a hook, when possible to detect.
+ - A collision between components provided by plugins (e.g. two plugins providing
+ the same Blueprints section)
"""
diff --git a/armi/reactor/__init__.py b/armi/reactor/__init__.py
index a25977afc..cfb43c151 100644
--- a/armi/reactor/__init__.py
+++ b/armi/reactor/__init__.py
@@ -55,11 +55,7 @@
from armi import plugins
-# Provide type checking but avoid circular imports
-# Not used during runtime so we could have a coverage drop here. Add the
-# pragma line to tell coverage.py to skip this
-# https://coverage.readthedocs.io/en/stable/excluding.html
-if TYPE_CHECKING: # pragma: no cover
+if TYPE_CHECKING:
from armi.reactor.reactors import Core
from armi.reactor.assemblyLists import SpentFuelPool
@@ -94,9 +90,9 @@ def defineAssemblyTypes():
@staticmethod
@plugins.HOOKIMPL(trylast=True)
- def defineSystemBuilders() -> Dict[
- str, Callable[[str], Union["Core", "SpentFuelPool"]]
- ]:
+ def defineSystemBuilders() -> (
+ Dict[str, Callable[[str], Union["Core", "SpentFuelPool"]]]
+ ):
from armi.reactor.reactors import Core
from armi.reactor.assemblyLists import SpentFuelPool
@@ -104,3 +100,10 @@ def defineSystemBuilders() -> Dict[
"core": Core,
"sfp": SpentFuelPool,
}
+
+ @staticmethod
+ @plugins.HOOKIMPL(trylast=True)
+ def getAxialExpansionChanger():
+ from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger
+
+ return AxialExpansionChanger
diff --git a/armi/reactor/assemblies.py b/armi/reactor/assemblies.py
index 85f4cfea9..84bc09e99 100644
--- a/armi/reactor/assemblies.py
+++ b/armi/reactor/assemblies.py
@@ -22,7 +22,7 @@
import pickle
from random import randint
-import numpy
+import numpy as np
from scipy import interpolate
from armi import runLog
@@ -32,6 +32,7 @@
from armi.reactor import composites
from armi.reactor import grids
from armi.reactor.flags import Flags
+from armi.materials.material import Fluid
from armi.reactor.parameters import ParamLocation
@@ -39,15 +40,6 @@ class Assembly(composites.Composite):
"""
A single assembly in a reactor made up of blocks built from the bottom up.
Append blocks to add them up. Index blocks with 0 being the bottom.
-
- Attributes
- ----------
- pinNum : int
- The number of pins in this assembly.
-
- pinPeakingFactors : list of floats
- The assembly-averaged pin power peaking factors. This is the ratio of pin
- power to AVERAGE pin power in an assembly.
"""
pDefs = assemblyParameters.getAssemblyParameterDefinitions()
@@ -74,14 +66,13 @@ def __init__(self, typ, assemNum=None):
"""
# If no assembly number is provided, generate a random number as a placeholder.
if assemNum is None:
- assemNum = randint(-9e12, -1)
+ assemNum = randint(-9000000000000, -1)
name = self.makeNameFromAssemNum(assemNum)
composites.Composite.__init__(self, name)
self.p.assemNum = assemNum
self.setType(typ)
self._current = 0 # for iterating
self.p.buLimit = self.getMaxParam("buLimit")
- self.pinPeakingFactors = [] # assembly-averaged pin power peaking factors
self.lastLocationLabel = self.LOAD_QUEUE
def __repr__(self):
@@ -166,7 +157,7 @@ def makeUnique(self):
otherwise have been the same object.
"""
# Default to a random negative assembly number (unique enough)
- self.p.assemNum = randint(-9e12, -1)
+ self.p.assemNum = randint(-9000000000000, -1)
self.renumber(self.p.assemNum)
def add(self, obj: blocks.Block):
@@ -191,15 +182,10 @@ def add(self, obj: blocks.Block):
"""
composites.Composite.add(self, obj)
obj.spatialLocator = self.spatialGrid[0, 0, len(self) - 1]
- # assemblies have bounds-based 1-D spatial grids. Adjust it to the right value.
- if len(self.spatialGrid._bounds[2]) < len(self):
- self.spatialGrid._bounds[2][len(self)] = (
- self.spatialGrid._bounds[2][len(self) - 1] + obj.getHeight()
- )
- else:
- # more work is needed, make a new mesh
- self.reestablishBlockOrder()
- self.calculateZCoords()
+
+ # more work is needed, make a new mesh
+ self.reestablishBlockOrder()
+ self.calculateZCoords()
def moveTo(self, locator):
"""Move an assembly somewhere else."""
@@ -322,19 +308,13 @@ def getAveragePlenumTemperature(self):
plenumBlocks = self.getBlocks(Flags.PLENUM)
plenumTemps = [b.p.THcoolantOutletT for b in plenumBlocks]
- if (
- not plenumTemps
- ): # no plenum blocks, use the top block of the assembly for plenum temperature
+ # no plenum blocks, use the top block of the assembly for plenum temperature
+ if not plenumTemps:
runLog.warning("No plenum blocks exist. Using outlet coolant temperature.")
plenumTemps = [self[-1].p.THcoolantOutletT]
return sum(plenumTemps) / len(plenumTemps)
- def rotatePins(self, *args, **kwargs):
- """Rotate an assembly, which means rotating the indexing of pins."""
- for b in self:
- b.rotatePins(*args, **kwargs)
-
def doubleResolution(self):
"""
Turns each block into two half-size blocks.
@@ -362,7 +342,7 @@ def doubleResolution(self):
bx.clearCache()
self.removeAll()
- self.spatialGrid = grids.axialUnitGrid(len(newBlockStack))
+ self.spatialGrid = grids.AxialGrid.fromNCells(len(newBlockStack))
for b in newBlockStack:
self.add(b)
self.reestablishBlockOrder()
@@ -416,7 +396,7 @@ def adjustResolution(self, refA):
)
self.removeAll()
- self.spatialGrid = grids.axialUnitGrid(len(newBlockStack))
+ self.spatialGrid = grids.AxialGrid.fromNCells(len(newBlockStack))
for b in newBlockStack:
self.add(b)
self.reestablishBlockOrder()
@@ -488,7 +468,7 @@ def calculateZCoords(self):
# length of this is numBlocks + 1
bounds = list(self.spatialGrid._bounds)
- bounds[2] = numpy.array(mesh)
+ bounds[2] = np.array(mesh)
self.spatialGrid._bounds = tuple(bounds)
def getTotalHeight(self, typeSpec=None):
@@ -660,7 +640,7 @@ def makeAxialSnapList(self, refAssem=None, refMesh=None, force=False):
for b in self:
top = z + b.getHeight()
try:
- b.p.topIndex = numpy.where(numpy.isclose(refMesh, top))[0].tolist()[0]
+ b.p.topIndex = np.where(np.isclose(refMesh, top))[0].tolist()[0]
except IndexError:
runLog.error(
"Height {0} in this assembly ({1} in {4}) is not in the reactor mesh "
@@ -673,7 +653,7 @@ def makeAxialSnapList(self, refAssem=None, refMesh=None, force=False):
def _shouldMassBeConserved(self, belowFuelColumn, b):
"""
- Determine from a rule set if the mass of a block should be conserved during axial expansion.
+ Determine from a rule set if the mass of a block component should be conserved during axial expansion.
Parameters
----------
@@ -689,8 +669,8 @@ def _shouldMassBeConserved(self, belowFuelColumn, b):
conserveMass : boolean
Should the mass be conserved in this block
- adjustList : list of nuclides
- What nuclides should have their mass conserved (if any)
+ conserveComponents : list of components
+ What components should have their mass conserved (if any)
belowFuelColumn : boolean
Update whether the block is above or below a fuel column
@@ -703,32 +683,31 @@ def _shouldMassBeConserved(self, belowFuelColumn, b):
if b.hasFlags(Flags.FUEL):
# fuel block
conserveMass = True
- adjustList = b.getComponent(Flags.FUEL).getNuclides()
+ conserveComponents = b.getComponents(Flags.FUEL)
elif self.hasFlags(Flags.FUEL):
# non-fuel block of a fuel assembly.
if belowFuelColumn:
# conserve mass of everything below the fuel so as to not invalidate
# grid-plate dose calcs.
conserveMass = True
- adjustList = b.getNuclides()
- # conserve mass of everything except coolant.
- coolant = b.getComponent(Flags.COOLANT)
- coolantList = coolant.getNuclides() if coolant else []
- for nuc in coolantList:
- if nuc in adjustList:
- adjustList.remove(nuc)
+ # conserve mass of everything except fluids.
+ conserveComponents = [
+ comp
+ for comp in b.getComponents()
+ if not isinstance(comp.material, Fluid)
+ ]
else:
# plenum or above block in fuel assembly. don't conserve mass.
conserveMass = False
- adjustList = None
+ conserveComponents = []
else:
# non fuel block in non-fuel assem. Don't conserve mass.
conserveMass = False
- adjustList = None
+ conserveComponents = []
- return conserveMass, adjustList
+ return conserveMass, conserveComponents
- def setBlockMesh(self, blockMesh, conserveMassFlag=False, adjustList=None):
+ def setBlockMesh(self, blockMesh, conserveMassFlag=False):
"""
Snaps the axial mesh points of this assembly to correspond with the reference mesh.
@@ -756,7 +735,14 @@ def setBlockMesh(self, blockMesh, conserveMassFlag=False, adjustList=None):
Parameters
----------
blockMesh : iterable
- a list of floats describing the upper mesh points of each block in cm.
+ A list of floats describing the upper mesh points of each block in cm.
+
+ conserveMassFlag : bool or str
+ Option for how to treat mass conservation when the block mesh changes.
+ Conservation of mass for fuel components is enabled by
+ conserveMassFlag="auto". If not auto, a boolean value should be
+ passed. The default is False, which does not conserve any masses.
+ True conserves mass for all components.
See Also
--------
@@ -796,22 +782,26 @@ def setBlockMesh(self, blockMesh, conserveMassFlag=False, adjustList=None):
return
if conserveMassFlag == "auto":
- conserveMass, adjustList = self._shouldMassBeConserved(
+ conserveMass, conserveComponents = self._shouldMassBeConserved(
belowFuelColumn, b
)
else:
conserveMass = conserveMassFlag
-
- b.setHeight(
- newTop - zBottom, conserveMass=conserveMass, adjustList=adjustList
- )
+ conserveComponents = b.getComponents()
+
+ oldBlockHeight = b.getHeight()
+ b.setHeight(newTop - zBottom)
+ if conserveMass:
+ heightRatio = oldBlockHeight / b.getHeight()
+ for c in conserveComponents:
+ c.changeNDensByFactor(heightRatio)
zBottom = newTop
self.calculateZCoords()
def setBlockHeights(self, blockHeights):
"""Set the block heights of all blocks in the assembly."""
- mesh = numpy.cumsum(blockHeights)
+ mesh = np.cumsum(blockHeights)
self.setBlockMesh(mesh)
def dump(self, fName=None):
@@ -1032,7 +1022,7 @@ def getBlocksBetweenElevations(self, zLower, zUpper):
return blocksHere
def getParamValuesAtZ(
- self, param, elevations, interpType="linear", fillValue=numpy.NaN
+ self, param, elevations, interpType="linear", fillValue=np.NaN
):
"""
Interpolates a param axially to find it at any value of elevation z.
@@ -1077,7 +1067,7 @@ def getParamValuesAtZ(
Returns
-------
- valAtZ : numpy.ndarray
+ valAtZ : np.ndarray
This will be of the shape (z,data-shape)
"""
interpolator = self.getParamOfZFunction(
@@ -1085,7 +1075,7 @@ def getParamValuesAtZ(
)
return interpolator(elevations)
- def getParamOfZFunction(self, param, interpType="linear", fillValue=numpy.NaN):
+ def getParamOfZFunction(self, param, interpType="linear", fillValue=np.NaN):
"""
Interpolates a param axially to find it at any value of elevation z.
@@ -1124,7 +1114,7 @@ def getParamOfZFunction(self, param, interpType="linear", fillValue=numpy.NaN):
Returns
-------
- valAtZ : numpy.ndarray
+ valAtZ : np.ndarray
This will be of the shape (z,data-shape)
"""
paramDef = self[0].p.paramDefs[param]
@@ -1148,7 +1138,7 @@ def getParamOfZFunction(self, param, interpType="linear", fillValue=numpy.NaN):
z.insert(0, 0.0)
z.pop(-1)
- z = numpy.asarray(z)
+ z = np.asarray(z)
values = self.getChildParamValues(param).transpose()
@@ -1184,7 +1174,7 @@ def reestablishBlockOrder(self):
reordering.
"""
# replace grid with one that has the right number of locations
- self.spatialGrid = grids.axialUnitGrid(len(self))
+ self.spatialGrid = grids.AxialGrid.fromNCells(len(self))
self.spatialGrid.armiObject = self
for zi, b in enumerate(self):
b.spatialLocator = self.spatialGrid[0, 0, zi]
@@ -1253,8 +1243,7 @@ def rotate(self, rad):
This method loops through every ``Block`` in this ``Assembly`` and rotates
it by a given angle (in radians). The rotation angle is positive in the
- counter-clockwise direction, and must be divisible by increments of PI/6
- (60 degrees). To actually perform the ``Block`` rotation, the
+ counter-clockwise direction. To perform the ``Block`` rotation, the
:py:meth:`armi.reactor.blocks.Block.rotate` method is called.
Parameters
@@ -1262,18 +1251,38 @@ def rotate(self, rad):
rad: float
number (in radians) specifying the angle of counter clockwise rotation
- Warning
- -------
- rad must be in 60-degree increments! (i.e., PI/6, PI/3, PI, 2 * PI/3, etc)
"""
- for b in self.getBlocks():
+ for b in self:
b.rotate(rad)
+ def isOnWhichSymmetryLine(self):
+ grid = self.parent.spatialGrid
+ return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())
+
class HexAssembly(Assembly):
"""Placeholder, so users can explicitly define a hex-based Assembly."""
- pass
+ def rotate(self, rad: float):
+ """Rotate an assembly and its children.
+
+ Parameters
+ ----------
+ rad : float
+ Counter clockwise rotation in radians. **MUST** be in increments of
+ 60 degrees (PI / 3)
+
+ Raises
+ ------
+ ValueError
+ If rotation is not divisible by pi / 3.
+ """
+ if math.isclose(rad % (math.pi / 3), 0, abs_tol=1e-12):
+ return super().rotate(rad)
+ raise ValueError(
+ f"Rotation must be in 60 degree increments, got {math.degrees(rad)} "
+ f"degrees ({rad} radians)"
+ )
class CartesianAssembly(Assembly):
diff --git a/armi/reactor/assemblyLists.py b/armi/reactor/assemblyLists.py
index 97aa7259e..1355ad811 100644
--- a/armi/reactor/assemblyLists.py
+++ b/armi/reactor/assemblyLists.py
@@ -186,7 +186,7 @@ def count(self):
)
lastTime = thisTime
thisTimeCount = 0
- totCount += 1
+ totCount += 1 # noqa: SIM113
thisTimeCount += 1
@@ -201,7 +201,6 @@ def add(self, assem, loc=None):
----------
assem : Assembly
The Assembly to add to the list
-
loc : LocationBase, optional
If provided, the assembly is inserted at that location. If it is not
provided, the locator on the Assembly object will be used. If the
diff --git a/armi/reactor/assemblyParameters.py b/armi/reactor/assemblyParameters.py
index 5e5ff700b..f7f1e0682 100644
--- a/armi/reactor/assemblyParameters.py
+++ b/armi/reactor/assemblyParameters.py
@@ -82,35 +82,34 @@ def getAssemblyParameterDefinitions():
pb.defParam(
"chargeFis",
units=units.KG,
- description="Fissile mass in assembly when it most recently entered the core."
- " If the assembly was discharged and then re-charged, this value will only"
- " reflect the most recent charge.",
+ description="Fissile mass in assembly when it most recently entered the core. If the "
+ "assembly was discharged and then re-charged, this value will only reflect the most "
+ "recent charge.",
)
pb.defParam(
"chargeTime",
units=units.YEARS,
- description="Time at which this assembly most recently entered the core."
- " If the assembly was discharged and then re-charged, this value will only"
- " reflect the most recent charge.",
+ description="Time at which this assembly most recently entered the core. If the "
+ "assembly was discharged and then re-charged, this value will only reflect the most "
+ "recent charge.",
default=parameters.NoDefault,
)
pb.defParam(
"multiplicity",
units=units.UNITLESS,
- description="The number of physical assemblies that the associated object "
- "represents. This is typically 1, but may need to change when the assembly "
- "is moved between containers with different types of symmetry. For "
- "instance, if an assembly moves from a Core with 1/3rd symmetry into a "
- "spent-fuel pool with full symmetry, rather than splitting the assembly "
- "into 3, the multiplicity can be set to 3. For now, this is a bit of a "
- "hack to make fuel handling work; multiplicity in the 1/3 core should "
- "be 3 to begin with, in which case this parameter could be used as the "
- "primary means of handling symmetry and fractional domains throughout "
- "ARMI. We will probably roll that out once the dust settles on some of "
- "this SFP work. For now, the Core stores multiplicity as 1 always, since "
- "the powerMultiplier to adjust to full-core quantities.",
+ description="The number of physical assemblies that the associated object represents. "
+ "This is typically 1, but may need to change when the assembly is moved between "
+ "containers with different types of symmetry. For instance, if an assembly moves from "
+ "a Core with 1/3rd symmetry into a spent-fuel pool with full symmetry, rather than "
+ "splitting the assembly into 3, the multiplicity can be set to 3. For now, this is a "
+ "bit of a hack to make fuel handling work; multiplicity in the 1/3 core should be 3 to "
+ "begin with, in which case this parameter could be used as the primary means of "
+ "handling symmetry and fractional domains throughout ARMI. We will probably roll that "
+ "out once the dust settles on some of this SFP work. For now, the Core stores "
+ "multiplicity as 1 always, since the powerMultiplier to adjust to full-core "
+ "quantities.",
default=1,
)
@@ -149,7 +148,7 @@ def getAssemblyParameterDefinitions():
def _enforceNotesRestrictions(self, value):
"""Enforces that notes can only be of type str with max length of 1000."""
- if type(value) != str:
+ if type(value) is not str:
runLog.error(
"Values stored in the `notes` parameter must be strings of less"
" than 1000 characters!"
@@ -168,10 +167,9 @@ def _enforceNotesRestrictions(self, value):
pb.defParam(
"notes",
units=units.UNITLESS,
- description="A string with notes about the assembly, limited to 1000 characters."
- " This parameter is not meant to store data. Needlessly storing large strings"
- " on this parameter for every assembly is potentially unwise from a memory"
- " perspective.",
+ description="A string with notes about the assembly, limited to 1000 characters. This "
+ "parameter is not meant to store data. Needlessly storing large strings on this "
+ "parameter for every assembly is potentially unwise from a memory perspective.",
saveToDB=True,
default="",
setter=_enforceNotesRestrictions,
@@ -185,8 +183,9 @@ def _enforceNotesRestrictions(self, value):
"crCriticalFraction",
units=units.UNITLESS,
description=(
- "The insertion fraction when the control rod assembly is in its critical configuration. "
- "Note that the default of -1.0 is a trigger for this value not being set yet."
+ "The insertion fraction when the control rod assembly is in its critical "
+ "configuration. Note that the default of -1.0 is a trigger for this value not "
+ "being set yet."
),
saveToDB=True,
default=-1.0,
@@ -204,9 +203,9 @@ def _enforceNotesRestrictions(self, value):
"crInsertedElevation",
units=units.CM,
description=(
- "The elevation of the furthest-most insertion point of a control rod assembly. For a control rod assembly "
- "inserted from the top, this will be the lower tip of the bottom-most moveable section in the assembly when "
- "fully inserted."
+ "The elevation of the furthest-most insertion point of a control rod assembly. For "
+ "a control rod assembly inserted from the top, this will be the lower tip of the "
+ "bottom-most moveable section in the assembly when fully inserted."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
@@ -223,9 +222,9 @@ def _enforceNotesRestrictions(self, value):
"crWithdrawnElevation",
units=units.CM,
description=(
- "The elevation of the tip of a control rod assembly when it is fully withdrawn. For a control rod assembly "
- "inserted from the top, this will be the lower tip of the bottom-most moveable section in the assembly when "
- "fully withdrawn."
+ "The elevation of the tip of a control rod assembly when it is fully withdrawn. "
+ "For a control rod assembly inserted from the top, this will be the lower tip of "
+ "the bottom-most moveable section in the assembly when fully withdrawn."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
diff --git a/armi/reactor/blockParameters.py b/armi/reactor/blockParameters.py
index fe1eb895c..4e8e399cf 100644
--- a/armi/reactor/blockParameters.py
+++ b/armi/reactor/blockParameters.py
@@ -18,7 +18,7 @@
from armi import runLog
from armi.physics.neutronics import crossSectionGroupManager
from armi.reactor import parameters
-from armi.reactor.parameters import ParamLocation, Parameter, NoDefault
+from armi.reactor.parameters import NoDefault, Parameter, ParamLocation
from armi.reactor.parameters.parameterDefinitions import isNumpyArray
from armi.utils import units
from armi.utils.units import ASCII_LETTER_A
@@ -33,8 +33,8 @@ def getBlockParameterDefinitions():
"orientation",
units=units.DEGREES,
description=(
- "Triple representing rotations counterclockwise around each spatial axis. For example, "
- "a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
+ "Triple representing rotations counterclockwise around each spatial axis. For "
+ "example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
),
default=None,
)
@@ -161,7 +161,10 @@ def getBlockParameterDefinitions():
pb.defParam(
"residence",
units=units.DAYS,
- description="Duration that a block has been in the core multiplied by the fraction of full power generated in that time.",
+ description=(
+ "Duration that a block has been in the core multiplied by the fraction "
+ "of full power generated in that time."
+ ),
categories=["cumulative"],
)
@@ -345,7 +348,10 @@ def buGroupNum(self, buGroupNum):
pb.defParam(
"axialExpTargetComponent",
units=units.UNITLESS,
- description="The name of the target component used for axial expansion and contraction of solid components.",
+ description=(
+ "The name of the target component used for axial expansion and "
+ "contraction of solid components."
+ ),
default="",
saveToDB=True,
)
@@ -446,18 +452,6 @@ def xsTypeNum(self, value):
description="Fuel density coefficient",
)
- pb.defParam(
- "rxFuelDopplerConstant",
- units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
- description="Fuel Doppler constant",
- )
-
- pb.defParam(
- "rxFuelVoidedDopplerConstant",
- units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
- description="Fuel voided-coolant Doppler constant",
- )
-
pb.defParam(
"rxFuelTemperatureCoeffPerMass",
units=f"{units.REACTIVITY}/{units.KG}",
@@ -477,12 +471,6 @@ def xsTypeNum(self, value):
description="Clad density coefficient",
)
- pb.defParam(
- "rxCladDopplerConstant",
- units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
- description="Clad Doppler constant",
- )
-
pb.defParam(
"rxCladTemperatureCoeffPerMass",
units=f"{units.REACTIVITY}/{units.KG}",
@@ -496,12 +484,6 @@ def xsTypeNum(self, value):
description="Structure density coefficient",
)
- pb.defParam(
- "rxStructureDopplerConstant",
- units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
- description="Structure Doppler constant",
- )
-
pb.defParam(
"rxStructureTemperatureCoeffPerMass",
units=f"{units.REACTIVITY}/{units.KG}",
@@ -550,6 +532,30 @@ def xsTypeNum(self, value):
description="Fuel voided-coolant Doppler coefficient",
)
+ pb.defParam(
+ "rxFuelDopplerConstant",
+ units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
+ description="Fuel Doppler constant",
+ )
+
+ pb.defParam(
+ "rxFuelVoidedDopplerConstant",
+ units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
+ description="Fuel voided-coolant Doppler constant",
+ )
+
+ pb.defParam(
+ "rxStructureDopplerConstant",
+ units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
+ description="Structure Doppler constant",
+ )
+
+ pb.defParam(
+ "rxCladDopplerConstant",
+ units=f"{units.REACTIVITY}*{units.DEGK}^(n-1)",
+ description="Clad Doppler constant",
+ )
+
pb.defParam(
"rxFuelTemperatureCoeffPerTemp",
units=f"{units.REACTIVITY}/{units.DEGK}",
@@ -625,12 +631,11 @@ def xsTypeNum(self, value):
pb.defParam(
"assemNum",
units=units.UNITLESS,
- description="Index that refers, nominally, to the assemNum parameter of "
- "the containing Assembly object. This is stored on the Block to aid in "
- "visualizing shuffle patterns and the like, and should not be used within "
- "the code. These are not guaranteed to be consistent with the containing "
- "Assembly, so they should not be used as a reliable means to reconstruct "
- "the model.",
+ description="Index that refers, nominally, to the assemNum parameter of the containing "
+ "Assembly object. This is stored on the Block to aid in visualizing shuffle patterns "
+ "and the like, and should not be used within the code. These are not guaranteed to be "
+ "consistent with the containing Assembly, so they should not be used as a reliable "
+ "means to reconstruct the model.",
categories=[parameters.Category.retainOnReplacement],
)
@@ -780,6 +785,7 @@ def xsTypeNum(self, value):
units=units.PERCENT_FIMA,
description="Peak percentage of the initial heavy metal atoms that have been fissioned",
location=ParamLocation.MAX,
+ categories=["cumulative", "eq cumulative shift"],
)
pb.defParam(
@@ -799,7 +805,10 @@ def xsTypeNum(self, value):
pb.defParam(
"smearDensity",
units=units.UNITLESS,
- description="Smear density of fuel pins in this block. Defined as the ratio of fuel area to total space inside cladding.",
+ description=(
+ "Smear density of fuel pins in this block. Defined as the ratio of fuel "
+ "area to total space inside cladding."
+ ),
location=ParamLocation.AVERAGE,
)
diff --git a/armi/reactor/blocks.py b/armi/reactor/blocks.py
index 46524a54c..cc18826a0 100644
--- a/armi/reactor/blocks.py
+++ b/armi/reactor/blocks.py
@@ -13,23 +13,23 @@
# limitations under the License.
"""
-Defines blocks, which are axial chunks of assemblies. They contain
-most of the state variables, including power, flux, and homogenized number densities.
+Defines blocks, which are axial chunks of assemblies. They contain most of the state variables,
+including power, flux, and homogenized number densities.
-Assemblies are made of blocks.
-
-Blocks are made of components.
+Assemblies are made of blocks. Blocks are made of components.
"""
from typing import Optional, Type, Tuple, ClassVar
import collections
import copy
import math
-import numpy
+import numpy as np
from armi import nuclideBases
from armi import runLog
from armi.bookkeeping import report
+from armi.nucDirectory import elements
+from armi.nuclearDataIO import xsCollections
from armi.physics.neutronics import GAMMA
from armi.physics.neutronics import NEUTRON
from armi.reactor import blockParameters
@@ -45,6 +45,7 @@
from armi.reactor.parameters import ParamLocation
from armi.utils import densityTools
from armi.utils import hexagon
+from armi.utils import iterables
from armi.utils import units
from armi.utils.plotting import plotBlockFlux
from armi.utils.units import TRACE_NUMBER_DENSITY
@@ -87,14 +88,13 @@ def __init__(self, name: str, height: float = 1.0):
The name of this block
height : float, optional
- The height of the block in cm. Defaults to 1.0 so that
- `getVolume` assumes unit height.
+ The height of the block in cm. Defaults to 1.0 so that ``getVolume`` assumes unit height.
"""
composites.Composite.__init__(self, name)
self.p.height = height
self.p.heightBOL = height
- self.p.orientation = numpy.array((0.0, 0.0, 0.0))
+ self.p.orientation = np.array((0.0, 0.0, 0.0))
self.points = []
self.macros = None
@@ -163,12 +163,11 @@ def createHomogenizedCopy(self, pinSpatialLocators=False):
Notes
-----
- Used to implement a copy function for specific block types that can
- be much faster than a deepcopy by glossing over details that may be
- unnecessary in certain contexts.
+ Used to implement a copy function for specific block types that can be much faster than a
+ deepcopy by glossing over details that may be unnecessary in certain contexts.
- This base class implementation is just a deepcopy of the block, in full detail
- (not homogenized).
+ This base class implementation is just a deepcopy of the block, in full detail (not
+ homogenized).
"""
return copy.deepcopy(self)
@@ -179,52 +178,15 @@ def core(self):
c = self.getAncestor(lambda c: isinstance(c, Core))
return c
- @property
- def r(self):
- """
- Look through the ancestors of the Block to find a Reactor, and return it.
-
- Notes
- -----
- Typical hierarchy: Reactor <- Core <- Assembly <- Block
- A block should only have a reactor through a parent assembly.
- It may make sense to try to factor out usage of ``b.r``.
-
- Returns
- -------
- core.parent : armi.reactor.reactors.Reactor
- ARMI reactor object that is an ancestor of the block.
-
- Raises
- ------
- ValueError
- If the parent of the block's ``core`` is not an ``armi.reactor.reactors.Reactor``.
- """
- from armi.reactor.reactors import Reactor
-
- core = self.core
- if core is None:
- return self.getAncestor(lambda o: isinstance(o, Reactor))
-
- if not isinstance(core.parent, Reactor):
- raise TypeError(
- "Parent of Block ({}) core is not a Reactor. Got {} instead".format(
- core.parent, type(core.parent)
- )
- )
-
- return core.parent
-
def makeName(self, assemNum, axialIndex):
"""
Generate a standard block from assembly number.
This also sets the block-level assembly-num param.
- Once, we used a axial-character suffix to represent the axial
- index, but this is inherently limited so we switched to a numerical
- name. The axial suffix needs can be brought in in plugins that require
- them.
+ Once, we used a axial-character suffix to represent the axial index, but this is inherently
+ limited so we switched to a numerical name. The axial suffix needs can be brought in to
+ plugins that require them.
Examples
--------
@@ -238,10 +200,10 @@ def getSmearDensity(self, cold=True):
"""
Compute the smear density of pins in this block.
- Smear density is the area of the fuel divided by the area of the space available
- for fuel inside the cladding. Other space filled with solid materials is not
- considered available. If all the area is fuel, it has 100% smear density. Lower
- smear density allows more room for swelling.
+ Smear density is the area of the fuel divided by the area of the space available for fuel
+ inside the cladding. Other space filled with solid materials is not considered available. If
+ all the area is fuel, it has 100% smear density. Lower smear density allows more room for
+ swelling.
.. warning:: This requires circular fuel and circular cladding. Designs that vary
from this will be wrong. It may make sense in the future to put this somewhere a
@@ -249,13 +211,12 @@ def getSmearDensity(self, cold=True):
Notes
-----
- This only considers circular objects. If you have a cladding that is not a circle,
- it will be ignored.
+ This only considers circular objects. If you have a cladding that is not a circle, it will
+ be ignored.
- Negative areas can exist for void gaps in the fuel pin. A negative area in a gap
- represents overlap area between two solid components. To account for this
- additional space within the pin cladding the abs(negativeArea) is added to the
- inner cladding area.
+ Negative areas can exist for void gaps in the fuel pin. A negative area in a gap represents
+ overlap area between two solid components. To account for this additional space within the
+ pin cladding the abs(negativeArea) is added to the inner cladding area.
Parameters
----------
@@ -287,7 +248,7 @@ def getSmearDensity(self, cold=True):
)
# Compute component areas
- cladID = numpy.mean([clad.getDimension("id", cold=cold) for clad in clads])
+ cladID = np.mean([clad.getDimension("id", cold=cold) for clad in clads])
innerCladdingArea = (
math.pi * (cladID**2) / 4.0 * self.getNumComponents(Flags.FUEL)
)
@@ -299,7 +260,8 @@ def getSmearDensity(self, cold=True):
if c.isFuel():
fuelComponentArea += componentArea
elif c.hasFlags(Flags.SLUG):
- # this flag designates that this clad/slug combination isn't fuel and shouldn't be counted in the average
+ # this flag designates that this clad/slug combination isn't fuel and shouldn't be
+ # counted in the average
pass
else:
if c.containsSolidMaterial():
@@ -337,9 +299,8 @@ def autoCreateSpatialGrids(self):
Raises
------
ValueError
- If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary.
-
-
+ If the multiplicities of the block are not only 1 or N or if generated ringNumber leads
+ to more positions than necessary.
"""
raise NotImplementedError()
@@ -363,7 +324,7 @@ def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):
volume: float, optional
If average=True, the volume-integrated flux is divided by volume before being returned.
- The user may specify a volume here, or the function will obtain the block volume directly.
+ The user may specify a volume, or the function will obtain the block volume directly.
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
@@ -375,7 +336,7 @@ def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):
flux = composites.ArmiObject.getMgFlux(
self, adjoint=adjoint, average=False, volume=volume, gamma=gamma
)
- if average and numpy.any(self.p.lastMgFlux):
+ if average and np.any(self.p.lastMgFlux):
volume = volume or self.getVolume()
lastFlux = self.p.lastMgFlux / volume
flux = (flux + lastFlux) / 2.0
@@ -391,8 +352,8 @@ def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
Parameters
----------
fluxes : 2-D list of floats
- The block-level pin multigroup fluxes. fluxes[g][i] represents the flux in group g for pin i.
- Flux units are the standard n/cm^2/s.
+ The block-level pin multigroup fluxes. fluxes[g][i] represents the flux in group g for
+ pin i. Flux units are the standard n/cm^2/s.
The "ARMI pin ordering" is used, which is counter-clockwise from 3 o'clock.
adjoint : bool, optional
Whether to set real or adjoint data.
@@ -402,8 +363,8 @@ def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
Outputs
-------
self.p.pinMgFluxes : 2-D array of floats
- The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g for pin i.
- Flux units are the standard n/cm^2/s.
+ The block-level pin multigroup fluxes. pinMgFluxes[g][i] represents the flux in group g
+ for pin i. Flux units are the standard n/cm^2/s.
The "ARMI pin ordering" is used, which is counter-clockwise from 3 o'clock.
"""
pinFluxes = []
@@ -422,7 +383,7 @@ def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
thisPinFlux.append(fluxes[g][pinLoc - 1])
pinFluxes.append(thisPinFlux)
- pinFluxes = numpy.array(pinFluxes)
+ pinFluxes = np.array(pinFluxes)
if gamma:
if adjoint:
raise ValueError("Adjoint gamma flux is currently unsupported.")
@@ -634,17 +595,6 @@ def setBuLimitInfo(self):
def getMaxArea(self):
raise NotImplementedError
- def getMaxVolume(self):
- """
- The maximum volume of this object if it were totally full.
-
- Returns
- -------
- vol : float
- volume in cm^3.
- """
- return self.getMaxArea() * self.getHeight()
-
def getArea(self, cold=False):
"""
Return the area of a block for a full core or a 1/3 core model.
@@ -733,11 +683,6 @@ def getSymmetryFactor(self):
"""
return 1.0
- def isOnWhichSymmetryLine(self):
- """Block symmetry lines are determined by the reactor, not the parent."""
- grid = self.core.spatialGrid
- return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())
-
def adjustDensity(self, frac, adjustList, returnMass=False):
"""
adjusts the total density of each nuclide in adjustList by frac.
@@ -769,7 +714,6 @@ def adjustDensity(self, frac, adjustList, returnMass=False):
numDensities = self.getNuclideNumberDensities(adjustList)
for nuclideName, dens in zip(adjustList, numDensities):
-
if not dens:
# don't modify zeros.
continue
@@ -794,7 +738,7 @@ def _updateDetailedNdens(self, frac, adjustList):
# BOL assems get expanded to a reference so the first check is needed so it
# won't call .blueprints on None since BOL assems don't have a core/r
return
- if any(nuc in self.r.blueprints.activeNuclides for nuc in adjustList):
+ if any(nuc in self.core.r.blueprints.activeNuclides for nuc in adjustList):
self.p.detailedNDens *= frac
# Other power densities do not need to be updated as they are calculated in
# the global flux interface, which occurs after axial expansion from crucible
@@ -1122,9 +1066,11 @@ def getNumPins(self):
nPins = [
sum(
[
- int(c.getDimension("mult"))
- if isinstance(c, basicShapes.Circle)
- else 0
+ (
+ int(c.getDimension("mult"))
+ if isinstance(c, basicShapes.Circle)
+ else 0
+ )
for c in self.iterComponents(compType)
]
)
@@ -1263,9 +1209,7 @@ def isPlenumPin(self, c):
and c.hasFlags(Flags.GAP)
and c.getDimension("id") == 0
)
- if self.hasFlags([Flags.PLENUM, Flags.ACLP]) and cIsCenterGapGap:
- return True
- return False
+ return self.hasFlags([Flags.PLENUM, Flags.ACLP]) and cIsCenterGapGap
def getPitch(self, returnComp=False):
"""
@@ -1290,6 +1234,7 @@ def getPitch(self, returnComp=False):
----------
returnComp : bool, optional
If true, will return the component that has the maximum pitch as well
+
Returns
-------
pitch : float or None
@@ -1417,9 +1362,9 @@ def getMfp(self, gamma=False):
lib = self.core.lib
flux = self.getMgFlux(gamma=gamma)
flux = [fi / max(flux) for fi in flux]
- mfpNumerator = numpy.zeros(len(flux))
- absMfpNumerator = numpy.zeros(len(flux))
- transportNumerator = numpy.zeros(len(flux))
+ mfpNumerator = np.zeros(len(flux))
+ absMfpNumerator = np.zeros(len(flux))
+ transportNumerator = np.zeros(len(flux))
numDensities = self.getNumberDensities()
@@ -1562,7 +1507,7 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
Returns
-------
- integratedFlux : numpy.array
+ integratedFlux : np.ndarray
multigroup neutron tracklength in [n-cm/s]
"""
if adjoint:
@@ -1574,7 +1519,7 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
else:
integratedFlux = self.p.mgFlux
- return numpy.array(integratedFlux)
+ return np.array(integratedFlux)
def getLumpedFissionProductCollection(self):
"""
@@ -1608,22 +1553,19 @@ def setAxialExpTargetComp(self, targetComponent):
:id: I_ARMI_MANUAL_TARG_COMP
:implements: R_ARMI_MANUAL_TARG_COMP
- Sets the ``axialExpTargetComponent`` parameter on the block to the name
- of the Component which is passed in. This is then used by the
+ Sets the ``axialExpTargetComponent`` parameter on the block to the name of the Component
+ which is passed in. This is then used by the
:py:class:`~armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`
class during axial expansion.
- This method is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`
- during the process of building a Block from the blueprints.
+ This method is typically called from within
+ :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` during the
+ process of building a Block from the blueprints.
Parameter
---------
targetComponent: :py:class:`Component ` object
- component specified to be target component for axial expansion changer
-
- See Also
- --------
- armi.reactor.converters.axialExpansionChanger.py::ExpansionData::_setTargetComponents
+ Component specified to be target component for axial expansion changer
"""
self.p.axialExpTargetComponent = targetComponent.name
@@ -1653,6 +1595,153 @@ def getPinCoordinates(self):
coords.append(clad.spatialLocator.getLocalCoordinates())
return coords
+ def getTotalEnergyGenerationConstants(self):
+ """
+ Get the total energy generation group constants for a block.
+
+ Gives the total energy generation rates when multiplied by the multigroup flux.
+
+ Returns
+ -------
+ totalEnergyGenConstant: np.ndarray
+ Total (fission + capture) energy generation group constants (Joules/cm)
+ """
+ return (
+ self.getFissionEnergyGenerationConstants()
+ + self.getCaptureEnergyGenerationConstants()
+ )
+
+ def getFissionEnergyGenerationConstants(self):
+ """
+ Get the fission energy generation group constants for a block.
+
+ Gives the fission energy generation rates when multiplied by the multigroup
+ flux.
+
+ Returns
+ -------
+ fissionEnergyGenConstant: np.ndarray
+ Energy generation group constants (Joules/cm)
+
+ Raises
+ ------
+ RuntimeError:
+ Reports if a cross section library is not assigned to a reactor.
+ """
+ if not self.core.lib:
+ raise RuntimeError(
+ "Cannot compute energy generation group constants without a library"
+ ". Please ensure a library exists."
+ )
+
+ return xsCollections.computeFissionEnergyGenerationConstants(
+ self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
+ )
+
+ def getCaptureEnergyGenerationConstants(self):
+ """
+ Get the capture energy generation group constants for a block.
+
+ Gives the capture energy generation rates when multiplied by the multigroup
+ flux.
+
+ Returns
+ -------
+ fissionEnergyGenConstant: np.ndarray
+ Energy generation group constants (Joules/cm)
+
+ Raises
+ ------
+ RuntimeError:
+ Reports if a cross section library is not assigned to a reactor.
+ """
+ if not self.core.lib:
+ raise RuntimeError(
+ "Cannot compute energy generation group constants without a library"
+ ". Please ensure a library exists."
+ )
+
+ return xsCollections.computeCaptureEnergyGenerationConstants(
+ self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
+ )
+
+ def getNeutronEnergyDepositionConstants(self):
+ """
+ Get the neutron energy deposition group constants for a block.
+
+ Returns
+ -------
+ energyDepConstants: np.ndarray
+ Neutron energy generation group constants (in Joules/cm)
+
+ Raises
+ ------
+ RuntimeError:
+ Reports if a cross section library is not assigned to a reactor.
+ """
+ if not self.core.lib:
+ raise RuntimeError(
+ "Cannot get neutron energy deposition group constants without "
+ "a library. Please ensure a library exists."
+ )
+
+ return xsCollections.computeNeutronEnergyDepositionConstants(
+ self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
+ )
+
+ def getGammaEnergyDepositionConstants(self):
+ """
+ Get the gamma energy deposition group constants for a block.
+
+ Returns
+ -------
+ energyDepConstants: np.ndarray
+ Energy generation group constants (in Joules/cm)
+
+ Raises
+ ------
+ RuntimeError:
+ Reports if a cross section library is not assigned to a reactor.
+ """
+ if not self.core.lib:
+ raise RuntimeError(
+ "Cannot get gamma energy deposition group constants without "
+ "a library. Please ensure a library exists."
+ )
+
+ return xsCollections.computeGammaEnergyDepositionConstants(
+ self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
+ )
+
+ def getBoronMassEnrich(self):
+ """Return B-10 mass fraction."""
+ b10 = self.getMass("B10")
+ b11 = self.getMass("B11")
+ total = b11 + b10
+ if total == 0.0:
+ return 0.0
+ return b10 / total
+
+ def getPuMoles(self):
+ """Returns total number of moles of Pu isotopes."""
+ nucNames = [nuc.name for nuc in elements.byZ[94].nuclides]
+ puN = sum(self.getNuclideNumberDensities(nucNames))
+
+ return (
+ puN
+ / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
+ * self.getVolume()
+ * self.getSymmetryFactor()
+ )
+
+ def getUraniumMassEnrich(self):
+ """Returns U-235 mass fraction assuming U-235 and U-238 only."""
+ u5 = self.getMass("U235")
+ if u5 < 1e-10:
+ return 0.0
+ u8 = self.getMass("U238")
+ return u5 / (u8 + u5)
+
class HexBlock(Block):
"""
@@ -1662,10 +1751,9 @@ class HexBlock(Block):
:id: I_ARMI_BLOCK_HEX
:implements: R_ARMI_BLOCK_HEX
- This class defines hexagonal-shaped Blocks. It inherits functionality from the parent
- class, Block, and defines hexagonal-specific methods including, but not limited to,
- querying pin pitch, pin linear power densities, hydraulic diameter, and retrieving
- inner and outer pitch.
+ This class defines hexagonal-shaped Blocks. It inherits functionality from the parent class,
+ Block, and defines hexagonal-specific methods including, but not limited to, querying pin
+ pitch, pin linear power densities, hydraulic diameter, and retrieving inner and outer pitch.
"""
PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = (components.Hexagon,)
@@ -1730,13 +1818,13 @@ def createHomogenizedCopy(self, pinSpatialLocators=False):
by omitting this detailed data and only providing the necessary level of detail for
the uniform mesh reactor: number densities on each block.
- .. note: Individual components within a block can have different temperatures, and this
+ Individual components within a block can have different temperatures, and this
can affect cross sections. This temperature variation is captured by the lattice physics
module. As long as temperature distribution is correctly captured during cross section
generation, it doesn't need to be transferred to the neutronics solver directly through
this copy operation.
- .. note: If you make a new block, you must add it to an assembly and a reactor.
+ If you make a new block, you must add it to an assembly and a reactor.
Returns
-------
@@ -1888,7 +1976,7 @@ def setPinPowers(self, powers, powerKeySuffix=""):
)
powerKey = f"linPowByPin{powerKeySuffix}"
- self.p[powerKey] = numpy.zeros(numPins)
+ self.p[powerKey] = np.zeros(numPins)
# Loop through rings. The *pinLocation* parameter is only accessed for fueled
# blocks; it is assumed that non-fueled blocks do not use a rotation map.
@@ -1925,8 +2013,7 @@ def rotate(self, rad):
Python list of length 6 in order to be eligible for rotation; all parameters that
do not meet these two criteria are not rotated.
- The pin indexing, as stored on the pinLocation parameter, is also updated via
- :py:meth:`rotatePins `.
+ The pin indexing, as stored on the ``pinLocation`` parameter, is also updated.
Parameters
----------
@@ -1935,54 +2022,53 @@ def rotate(self, rad):
in 60-degree increments (i.e., PI/6, PI/3, PI, 2 * PI/3, 5 * PI/6,
and 2 * PI)
- See Also
- --------
- :py:meth:`rotatePins `
"""
rotNum = round((rad % (2 * math.pi)) / math.radians(60))
- self.rotatePins(rotNum)
- params = self.p.paramDefs.atLocation(ParamLocation.CORNERS).names
- params += self.p.paramDefs.atLocation(ParamLocation.EDGES).names
- for param in params:
- if isinstance(self.p[param], list):
- if len(self.p[param]) == 6:
- self.p[param] = self.p[param][-rotNum:] + self.p[param][:-rotNum]
- elif self.p[param] == []:
- # List hasn't been defined yet, no warning needed.
- pass
- else:
- msg = (
- "No rotation method defined for spatial parameters that aren't "
- "defined once per hex edge/corner. No rotation performed "
- f"on {param}"
- )
- runLog.warning(msg)
- elif isinstance(self.p[param], numpy.ndarray):
- if len(self.p[param]) == 6:
- self.p[param] = numpy.concatenate(
- (self.p[param][-rotNum:], self.p[param][:-rotNum])
- )
- elif len(self.p[param]) == 0:
+ self._rotatePins(rotNum)
+ self._rotateBoundaryParameters(rotNum)
+ self._rotateDisplacement(rad)
+
+ def _rotateBoundaryParameters(self, rotNum: int):
+ """Rotate any parameters defined on the corners or edge of bounding hexagon.
+
+ Parameters
+ ----------
+ rotNum : int
+ Rotation number between zero and five, inclusive, specifying how many
+ rotations have taken place.
+
+ """
+ names = self.p.paramDefs.atLocation(ParamLocation.CORNERS).names
+ names += self.p.paramDefs.atLocation(ParamLocation.EDGES).names
+ for name in names:
+ original = self.p[name]
+ if isinstance(original, (list, np.ndarray)):
+ if len(original) == 6:
+ # Rotate by making the -rotNum item be first
+ self.p[name] = iterables.pivot(original, -rotNum)
+ elif len(original) == 0:
# Hasn't been defined yet, no warning needed.
pass
else:
msg = (
"No rotation method defined for spatial parameters that aren't "
"defined once per hex edge/corner. No rotation performed "
- f"on {param}"
+ f"on {name}"
)
runLog.warning(msg)
- elif isinstance(self.p[param], (int, float)):
+ elif isinstance(original, (int, float)):
# this is a scalar and there shouldn't be any rotation.
pass
- elif self.p[param] is None:
+ elif original is None:
# param is not set yet. no rotations as well.
pass
else:
raise TypeError(
- f"b.rotate() method received unexpected data type for {param} on block {self}\n"
- + f"expected list, np.ndarray, int, or float. received {self.p[param]}"
+ f"b.rotate() method received unexpected data type for {name} on block {self}\n"
+ + f"expected list, np.ndarray, int, or float. received {original}"
)
+
+ def _rotateDisplacement(self, rad: float):
# This specifically uses the .get() functionality to avoid an error if this
# parameter does not exist.
dispx = self.p.get("displacementX")
@@ -1991,7 +2077,7 @@ def rotate(self, rad):
self.p.displacementX = dispx * math.cos(rad) - dispy * math.sin(rad)
self.p.displacementY = dispx * math.sin(rad) + dispy * math.cos(rad)
- def rotatePins(self, rotNum, justCompute=False):
+ def _rotatePins(self, rotNum, justCompute=False):
"""
Rotate the pins of a block, which means rotating the indexing of pins. Note that this does
not rotate all block quantities, just the pins.
@@ -2050,10 +2136,12 @@ def rotatePins(self, rotNum, justCompute=False):
"Cannot rotate {0} to rotNum {1}. Must be 0-5. ".format(self, rotNum)
)
- # Pin numbers start at 1. Number of pins in the block is assumed to be based on
- # cladding count.
- numPins = self.getNumComponents(Flags.CLAD)
- rotateIndexLookup = dict(zip(range(1, numPins + 1), range(1, numPins + 1)))
+ numPins = self.getNumPins()
+ hexRings = hexagon.numRingsToHoldNumCells(numPins)
+ fullNumPins = hexagon.totalPositionsUpToRing(hexRings)
+ rotateIndexLookup = dict(
+ zip(range(1, fullNumPins + 1), range(1, fullNumPins + 1))
+ )
# Look up the current orientation and add this to it. The math below just rotates
# from the reference point so we need a total rotation.
@@ -2061,22 +2149,12 @@ def rotatePins(self, rotNum, justCompute=False):
# non-trivial rotation requested
# start at 2 because pin 1 never changes (it's in the center!)
- for pinNum in range(2, numPins + 1):
+ for pinNum in range(2, fullNumPins + 1):
if rotNum == 0:
# Rotation to reference orientation. Pin locations are pin IDs.
pass
else:
- # Determine the pin ring. Rotation does not change the pin ring!
- ring = int(
- math.ceil((3.0 + math.sqrt(9.0 - 12.0 * (1.0 - pinNum))) / 6.0)
- )
-
- # Rotate the pin position (within the ring, which does not change)
- tot_pins = 1 + 3 * ring * (ring - 1)
- newPinLocation = pinNum + (ring - 1) * rotNum
- if newPinLocation > tot_pins:
- newPinLocation -= (ring - 1) * 6
-
+ newPinLocation = hexagon.getIndexOfRotatedCell(pinNum, rotNum)
# Assign "before" and "after" pin indices to the index lookup
rotateIndexLookup[pinNum] = newPinLocation
@@ -2086,7 +2164,7 @@ def rotatePins(self, rotNum, justCompute=False):
if not justCompute:
self.setRotationNum(rotNum)
self.p["pinLocation"] = [
- rotateIndexLookup[pinNum] for pinNum in range(1, numPins + 1)
+ rotateIndexLookup[pinNum] for pinNum in range(1, fullNumPins + 1)
]
return rotateIndexLookup
@@ -2198,7 +2276,7 @@ def getPinToDuctGap(self, cold=False):
def getRotationNum(self):
"""Get index 0 through 5 indicating number of rotations counterclockwise around the z-axis."""
return (
- numpy.rint(self.p.orientation[2] / 360.0 * 6) % 6
+ np.rint(self.p.orientation[2] / 360.0 * 6) % 6
) # assume rotation only in Z
def setRotationNum(self, rotNum):
@@ -2225,7 +2303,7 @@ def getSymmetryFactor(self):
"""
try:
symmetry = self.parent.spatialLocator.grid.symmetry
- except: # noqa: bare-except
+ except Exception:
return 1.0
if (
symmetry.domain == geometry.DomainType.THIRD_CORE
@@ -2251,25 +2329,25 @@ def getSymmetryFactor(self):
def autoCreateSpatialGrids(self):
"""
- Given a block without a spatialGrid, create a spatialGrid and give its children
- the corresponding spatialLocators (if it is a simple block).
+ Given a block without a spatialGrid, create a spatialGrid and give its children the
+ corresponding spatialLocators (if it is a simple block).
- In this case, a simple block would be one that has either multiplicity of
- components equal to 1 or N but no other multiplicities. Also, this should only
- happen when N fits exactly into a given number of hex rings. Otherwise, do not
- create a grid for this block.
+ In this case, a simple block would be one that has either multiplicity of components equal
+ to 1 or N but no other multiplicities. Also, this should only happen when N fits exactly
+ into a given number of hex rings. Otherwise, do not create a grid for this block.
Notes
-----
- If the block meets all the conditions, we gather all components to either be a multiIndexLocation containing all
- of the pin positions, otherwise, locator is the center (0,0).
+ If the Block meets all the conditions, we gather all components to either be a
+ multiIndexLocation containing all of the pin positions, or the locator is the center (0,0).
Also, this only works on blocks that have 'flat side up'.
Raises
------
ValueError
- If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary.
+ If the multiplicities of the block are not only 1 or N or if generated ringNumber leads
+ to more positions than necessary.
"""
# Check multiplicities...
mults = {c.getDimension("mult") for c in self.iterComponents()}
@@ -2331,8 +2409,8 @@ def getPinPitch(self, cold=False):
"""
Get the pin pitch in cm.
- Assumes that the pin pitch is defined entirely by contacting cladding tubes
- and wire wraps. Grid spacers not yet supported.
+ Assumes that the pin pitch is defined entirely by contacting cladding tubes and wire wraps.
+ Grid spacers not yet supported.
.. impl:: Pin pitch within block is retrievable.
:id: I_ARMI_BLOCK_DIMS6
@@ -2458,7 +2536,7 @@ def getWettedPerimeter(self):
correctionFactor = 1.0
if isinstance(c, Helix):
# account for the helical wire wrap
- correctionFactor = numpy.hypot(
+ correctionFactor = np.hypot(
1.0,
math.pi
* c.getDimension("helixDiameter")
@@ -2519,7 +2597,6 @@ def getHydraulicDiameter(self):
class CartesianBlock(Block):
-
PITCH_DIMENSION = "widthOuter"
PITCH_COMPONENT_TYPE = components.Rectangle
diff --git a/armi/reactor/blueprints/__init__.py b/armi/reactor/blueprints/__init__.py
index e600c4f75..a17ddbdfd 100644
--- a/armi/reactor/blueprints/__init__.py
+++ b/armi/reactor/blueprints/__init__.py
@@ -71,7 +71,6 @@
from ruamel.yaml import CLoader, RoundTripLoader
import ordered_set
-import tabulate
import yamlize
import yamlize.objects
@@ -102,6 +101,7 @@
CONF_ACCEPTABLE_BLOCK_AREA_ERROR,
CONF_GEOM_FILE,
)
+from armi.utils import tabulate
from armi.utils import textProcessors
from armi.utils.customExceptions import InputError
@@ -151,7 +151,7 @@ def __new__(mcs, name, bases, attrs):
else:
pluginSections = pm.hook.defineBlueprintsSections()
for plug in pluginSections:
- for (attrName, section, resolver) in plug:
+ for attrName, section, resolver in plug:
assert isinstance(section, yamlize.Attribute)
if attrName in attrs:
raise plugins.PluginError(
@@ -326,10 +326,12 @@ def _prepConstruction(self, cs):
for a in list(self.assemblies.values())
if not any(a.hasFlags(f) for f in assemsToSkip)
)
- axialExpansionChanger.expandColdDimsToHot(
- assemsToExpand,
- cs[CONF_DETAILED_AXIAL_EXPANSION],
- )
+ axialExpander = getPluginManagerOrFail().hook.getAxialExpansionChanger()
+ if axialExpander is not None:
+ axialExpander.expandColdDimsToHot(
+ assemsToExpand,
+ cs[CONF_DETAILED_AXIAL_EXPANSION],
+ )
getPluginManagerOrFail().hook.afterConstructionOfAssemblies(
assemblies=self.assemblies.values(), cs=cs
@@ -479,7 +481,7 @@ def _resolveNuclides(self, cs):
),
]
],
- tablefmt="plain",
+ tableFmt="plain",
),
single=True,
)
diff --git a/armi/reactor/blueprints/assemblyBlueprint.py b/armi/reactor/blueprints/assemblyBlueprint.py
index 8e13d1fcb..d7ff66f4f 100644
--- a/armi/reactor/blueprints/assemblyBlueprint.py
+++ b/armi/reactor/blueprints/assemblyBlueprint.py
@@ -15,12 +15,10 @@
"""
This module defines the blueprints input object for assemblies.
-In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible
-for constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly``
-construction from the rest of ARMI as much as possible. For example, an assembly does
-not require a reactor to be constructed, or a geometry file (but uses contained Block
-geometry type as a surrogate).
-
+In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible for
+constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly`` construction
+from the rest of ARMI as much as possible. For example, an assembly does not require a reactor to be
+constructed, or a geometry file (but uses contained Block geometry type as a surrogate).
"""
import yamlize
@@ -46,8 +44,8 @@ def _configureAssemblyTypes():
class Modifications(yamlize.Map):
"""
- The names of material modifications and lists of the modification values for
- each block in the assembly.
+ The names of material modifications and lists of the modification values for each block in the
+ assembly.
"""
key_type = yamlize.Typed(str)
@@ -55,10 +53,7 @@ class Modifications(yamlize.Map):
class ByComponentModifications(yamlize.Map):
- """
- The name of a component within the block and an associated Modifications
- object.
- """
+ """The name of a component within the block and an associated Modifications object."""
key_type = yamlize.Typed(str)
value_type = Modifications
@@ -68,35 +63,36 @@ class MaterialModifications(yamlize.Map):
"""
A yamlize map for reading and holding material modifications.
- A user may specify material modifications directly
- as keys/values on this class, in which case these material modifications will
- be blanket applied to the entire block.
+ A user may specify material modifications directly as keys/values on this class, in which case
+ these material modifications will be blanket applied to the entire block.
- If the user wishes to specify material modifications specific to a component
- within the block, they should use the `by component` attribute, specifying
- the keys/values underneath the name of a specific component in the block.
+ If the user wishes to specify material modifications specific to a component within the block,
+ they should use the `by component` attribute, specifying the keys/values underneath the name of
+ a specific component in the block.
.. impl:: User-impact on material definitions.
:id: I_ARMI_MAT_USER_INPUT0
:implements: R_ARMI_MAT_USER_INPUT
- Defines a yaml map attribute for the assembly portion of the blueprints
- (see :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that
- allows users to specify material attributes as lists corresponding to
- each axial block in the assembly. Two types of specifications can be made:
-
- 1. Key-value pairs can be specified directly, where the key is the
- name of the modification and the value is the list of block values.
-
- 2. The "by component" attribute can be used, in which case the user
- can specify material attributes that are specific to individual components
- in each block. This is enabled through the :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications`
- class, which basically just allows for one additional layer of attributes
- corresponding to the component names.
-
- These material attributes can be used during the resolution of material
- classes during core instantiation (see :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`
- and :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).
+ Defines a yaml map attribute for the assembly portion of the blueprints (see
+ :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that allows users to
+ specify material attributes as lists corresponding to each axial block in the assembly. Two
+ types of specifications can be made:
+
+ 1. Key-value pairs can be specified directly, where the key is the name of the
+ modification and the value is the list of block values.
+
+ 2. The "by component" attribute can be used, in which case the user can specify material
+ attributes that are specific to individual components in each block. This is enabled
+ through the
+ :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications` class,
+ which basically just allows for one additional layer of attributes corresponding to the
+ component names.
+
+ These material attributes can be used during the resolution of material classes during core
+ instantiation (see
+ :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` and
+ :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).
"""
key_type = yamlize.Typed(str)
@@ -209,7 +205,7 @@ def _constructAssembly(self, cs, blueprint):
a.p.flags = flags
# set a basic grid with the right number of blocks with bounds to be adjusted.
- a.spatialGrid = grids.axialUnitGrid(len(blocks))
+ a.spatialGrid = grids.AxialGrid.fromNCells(len(blocks))
a.spatialGrid.armiObject = a
# TODO: Remove mesh points from blueprints entirely. Submeshing should be
@@ -254,9 +250,7 @@ def _shouldMaterialModiferBeApplied(value) -> bool:
bool
Result of the check
"""
- if value != "" and value is not None:
- return True
- return False
+ return bool(value != "" and value is not None)
def _createBlock(self, cs, blueprint, bDesign, axialIndex):
"""Create a block based on the block design and the axial index."""
diff --git a/armi/reactor/blueprints/blockBlueprint.py b/armi/reactor/blueprints/blockBlueprint.py
index 7c11ded56..f233fd58a 100644
--- a/armi/reactor/blueprints/blockBlueprint.py
+++ b/armi/reactor/blueprints/blockBlueprint.py
@@ -48,25 +48,24 @@ class BlockBlueprint(yamlize.KeyedList):
:id: I_ARMI_BP_BLOCK
:implements: R_ARMI_BP_BLOCK
- Defines a yaml construct that allows the user to specify attributes of a
- block from within their blueprints file, including a name, flags, a radial
- grid to specify locations of pins, and the name of a component which
- drives the axial expansion of the block (see :py:mod:`~armi.reactor.converters.axialExpansionChanger`).
+ Defines a yaml construct that allows the user to specify attributes of a block from within
+ their blueprints file, including a name, flags, a radial grid to specify locations of pins,
+ and the name of a component which drives the axial expansion of the block (see
+ :py:mod:`~armi.reactor.converters.axialExpansionChanger`).
- In addition, the user may specify key-value pairs to specify the components
- contained within the block, where the keys are component names and the
- values are component blueprints (see :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).
+ In addition, the user may specify key-value pairs to specify the components contained within
+ the block, where the keys are component names and the values are component blueprints (see
+ :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented into a blueprints file by being imported and used
- as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class.
+ Is implemented into a blueprints file by being imported and used as an attribute within the
+ larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
Includes a ``construct`` method, which instantiates an instance of
- :py:class:`~armi.reactor.blocks.Block` with the characteristics
- as specified in the blueprints.
+ :py:class:`~armi.reactor.blocks.Block` with the characteristics as specified in the
+ blueprints.
"""
item_type = componentBlueprint.ComponentBlueprint
diff --git a/armi/reactor/blueprints/componentBlueprint.py b/armi/reactor/blueprints/componentBlueprint.py
index e1404cd2b..41dcc0dce 100644
--- a/armi/reactor/blueprints/componentBlueprint.py
+++ b/armi/reactor/blueprints/componentBlueprint.py
@@ -13,7 +13,8 @@
# limitations under the License.
"""
-This module defines the ARMI input for a component definition, and code for constructing an ARMI ``Component``.
+This module defines the ARMI input for a component definition, and code for constructing an ARMI
+``Component``.
Special logic is required for handling component links.
"""
@@ -69,11 +70,11 @@ def from_yaml(cls, loader, node, _rtd=None):
@classmethod
def to_yaml(cls, dumper, self, _rtd=None):
"""
- Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end up with a
- ``{value: ...}`` dictionary.
+ Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end
+ up with a ``{value: ...}`` dictionary.
- This allows someone to programmatically edit the component dimensions without using the ``ComponentDimension``
- class.
+ This allows someone to programmatically edit the component dimensions without using the
+ ``ComponentDimension`` class.
"""
if not isinstance(self, cls):
self = cls(self)
@@ -117,36 +118,36 @@ def __hash__(self):
class ComponentBlueprint(yamlize.Object):
"""
- This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to enable serialization
- to and from YAML.
+ This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to
+ enable serialization to and from YAML.
.. impl:: Construct component from blueprint file.
:id: I_ARMI_BP_COMP
:implements: R_ARMI_BP_COMP
- Defines a yaml construct that allows the user to specify attributes of a
- component from within their blueprints file, including a name, flags, shape,
- material and/or isotopic vector, input temperature, corresponding component dimensions,
- and ID for placement in a block lattice (see :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`).
- Component dimensions that can be defined for a given component are dependent
- on the component's ``shape`` attribute, and the dimensions defining each
- shape can be found in the :py:mod:`~armi.reactor.components` module.
+ Defines a yaml construct that allows the user to specify attributes of a component from
+ within their blueprints file, including a name, flags, shape, material and/or isotopic
+ vector, input temperature, corresponding component dimensions, and ID for placement in a
+ block lattice (see :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`).
+ Component dimensions that can be defined for a given component are dependent on the
+ component's ``shape`` attribute, and the dimensions defining each shape can be found in the
+ :py:mod:`~armi.reactor.components` module.
- Limited validation on the inputs is performed to ensure that the component
- shape corresponds to a valid shape defined by the ARMI application.
+ Limited validation on the inputs is performed to ensure that the component shape corresponds
+ to a valid shape defined by the ARMI application.
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented as part of a blueprints file by being imported and used
- as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class. Can also be used within the :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`
- class to enable specification of components directly within the "blocks"
- portion of the blueprint file.
+ Is implemented as part of a blueprints file by being imported and used as an attribute
+ within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Can also be used
+ within the :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint` class to
+ enable specification of components directly within the "blocks" portion of the blueprint
+ file.
Includes a ``construct`` method, which instantiates an instance of
- :py:class:`~armi.reactor.components.component.Component` with the characteristics
- specified in the blueprints (see :need:`I_ARMI_MAT_USER_INPUT1`).
+ :py:class:`~armi.reactor.components.component.Component` with the characteristics specified
+ in the blueprints (see :need:`I_ARMI_MAT_USER_INPUT1`).
"""
name = yamlize.Attribute(type=str)
@@ -224,8 +225,69 @@ class can then use to modify the isotopics as necessary.
constructedObject = components.factory(shape, [], kwargs)
_setComponentFlags(constructedObject, self.flags, blueprint)
insertDepletableNuclideKeys(constructedObject, blueprint)
+ constructedObject.p.theoreticalDensityFrac = (
+ constructedObject.material.getTD()
+ )
+
+ # set the custom density for non-custom material components after construction
+ self.setCustomDensity(constructedObject, blueprint, matMods)
+
return constructedObject
+ def setCustomDensity(self, constructedComponent, blueprint, matMods):
+ """Apply a custom density to a material with custom isotopics but not a 'custom material'."""
+ if self.isotopics is None:
+ # No custom isotopics specified
+ return
+
+ density = blueprint.customIsotopics[self.isotopics].density
+ if density is None:
+ # Nothing to do
+ return
+
+ if density <= 0:
+ runLog.error(
+ "A zero or negative density was specified in a custom isotopics input. "
+ "This is not permitted, if a 0 density material is needed, use 'Void'. "
+ "The component is {} and the isotopics entry is {}.".format(
+ constructedComponent, self.isotopics
+ )
+ )
+ raise ValueError(
+ "A zero or negative density was specified in the custom isotopics for a component"
+ )
+
+ mat = materials.resolveMaterialClassByName(self.material)()
+ if not isinstance(mat, materials.Custom):
+ # check for some problem cases
+ if "TD_frac" in matMods.keys():
+ runLog.warning(
+ "Both TD_frac and a custom density (custom isotopics) has been specified for "
+ "material {}. The custom density will override the density calculated using "
+ "TD_frac.".format(self.material)
+ )
+ if not mat.density(Tc=self.Tinput) > 0:
+ runLog.error(
+ "A custom density has been assigned to material '{}', which has no baseline "
+ "density. Only materials with a starting density may be assigned a density. "
+ "This comes up e.g. if isotopics are assigned to 'Void'.".format(
+ self.material
+ )
+ )
+ raise ValueError(
+ "Cannot apply custom densities to materials without density."
+ )
+
+ densityRatio = density / constructedComponent.density()
+ constructedComponent.changeNDensByFactor(densityRatio)
+
+ runLog.important(
+ "A custom material density was specified in the custom isotopics for non-custom "
+ "material {}. The component density has been altered to "
+ "{}.".format(mat, constructedComponent.density()),
+ single=True,
+ )
+
def _conformKwargs(self, blueprint, matMods):
"""This method gets the relevant kwargs to construct the component."""
kwargs = {"mergeWith": self.mergeWith or "", "isotopics": self.isotopics or ""}
@@ -340,21 +402,21 @@ def insertDepletableNuclideKeys(c, blueprint):
This is called during the component construction process for each component from within
:py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`.
- For a given initialized component, check its flags to determine if it
- has been marked as depletable. If it is, use :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain`
- to apply the user-specifications in the "nuclide flags" section of the blueprints
- to the component such that all active isotopes and derivatives of those
- isotopes in the burn chain are initialized to have an entry in the component's
- ``numberDensities`` dictionary.
+ For a given initialized component, check its flags to determine if it has been marked as
+ depletable. If it is, use
+ :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain` to
+ apply the user-specifications in the "nuclide flags" section of the blueprints to the
+ Component such that all active isotopes and derivatives of those isotopes in the burn chain
+ are initialized to have an entry in the component's ``numberDensities`` dictionary.
- Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``,
- may trigger modifications to the active nuclides specified by the user
- in the "nuclide flags" section of the blueprints.
+ Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``, may trigger
+ modifications to the active nuclides specified by the user in the "nuclide flags" section of
+ the blueprints.
Notes
-----
- This should be moved to a neutronics/depletion plugin hook but requires some
- refactoring in how active nuclides and reactors are initialized first.
+ This should be moved to a neutronics/depletion plugin hook but requires some refactoring in how
+ active nuclides and reactors are initialized first.
See Also
--------
@@ -375,8 +437,8 @@ class ComponentKeyedList(yamlize.KeyedList):
This is used within the ``components:`` main entry of the blueprints.
- This is *not* (yet) used when components are defined within a block blueprint.
- That is handled in the blockBlueprint construct method.
+ This is *not* (yet) used when components are defined within a block blueprint. That is handled
+ in the blockBlueprint construct method.
"""
item_type = ComponentBlueprint
diff --git a/armi/reactor/blueprints/gridBlueprint.py b/armi/reactor/blueprints/gridBlueprint.py
index 13d5bbb99..12455741f 100644
--- a/armi/reactor/blueprints/gridBlueprint.py
+++ b/armi/reactor/blueprints/gridBlueprint.py
@@ -14,20 +14,17 @@
"""
Input definitions for Grids.
-Grids are given names which can be referred to on other input structures
-(like core maps and pin maps).
+Grids are given names which can be referred to on other input structures (like core maps and pin
+maps).
-These are in turn interpreted into concrete things at lower levels. For
-example:
+These are in turn interpreted into concrete things at lower levels. For example:
-* Core map lattices get turned into :py:mod:`armi.reactor.grids`,
- which get set to ``core.spatialGrid``.
-* Block pin map lattices get applied to the components to provide
- some subassembly spatial details
+* Core map lattices get turned into :py:mod:`armi.reactor.grids`, which get set to
+ ``core.spatialGrid``.
+* Block pin map lattices get applied to the components to provide some subassembly spatial details.
-Lattice inputs here are floating in space. Specific dimensions
-and anchor points are handled by the lower-level objects definitions. This
-is intended to maximize lattice reusability.
+Lattice inputs here are floating in space. Specific dimensions and anchor points are handled by the
+lower-level objects definitions. This is intended to maximize lattice reusability.
See Also
--------
@@ -102,23 +99,22 @@
IC MC MC OC RR
IC IC MC PC RR SH
-
"""
-import copy
from io import StringIO
-import itertools
from typing import Tuple
+import copy
+import itertools
-import numpy
+import numpy as np
import yamlize
from ruamel.yaml import scalarstring
-from armi.utils.customExceptions import InputError
+from armi import runLog
+from armi.reactor import blueprints
+from armi.reactor import geometry, grids
from armi.utils import asciimaps
+from armi.utils.customExceptions import InputError
from armi.utils.mathematics import isMonotonic
-from armi.reactor import geometry, grids
-from armi.reactor import blueprints
-from armi import runLog
class Triplet(yamlize.Object):
@@ -182,33 +178,32 @@ class GridBlueprint(yamlize.Object):
"""
A grid input blueprint.
- These directly build Grid objects and contain information about
- how to populate the Grid with child ArmiObjects for the Reactor Model.
+ These directly build Grid objects and contain information about how to populate the Grid with
+ child ArmiObjects for the Reactor Model.
- The grids get origins either from a parent block (for pin lattices)
- or from a System (for Cores, SFPs, and other components).
+ The grids get origins either from a parent block (for pin lattices) or from a System (for Cores,
+ SFPs, and other components).
.. impl:: Define a lattice map in reactor core.
:id: I_ARMI_BP_GRID
:implements: R_ARMI_BP_GRID
- Defines a yaml construct that allows the user to specify a grid
- from within their blueprints file, including a name, geometry, dimensions,
- symmetry, and a map with the relative locations of components within that grid.
+ Defines a yaml construct that allows the user to specify a grid from within their blueprints
+ file, including a name, geometry, dimensions, symmetry, and a map with the relative
+ locations of components within that grid.
- Relies on the underlying infrastructure from the ``yamlize`` package for
- reading from text files, serialization, and internal storage of the data.
+ Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
+ files, serialization, and internal storage of the data.
- Is implemented as part of a blueprints file by being used in key-value pairs
- within the :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class,
- which is imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
- class.
+ Is implemented as part of a blueprints file by being used in key-value pairs within the
+ :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class, which is imported and used as
+ an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
- Includes a ``construct`` method, which instantiates an instance of one
- of the subclasses of :py:class:`~armi.reactor.grids.structuredGrid.StructuredGrid`.
- This is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`,
- which then also associates the individual components in the block with
- locations specifed in the grid.
+ Includes a ``construct`` method, which instantiates an instance of one of the subclasses of
+ :py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`. This is typically called from
+ within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`, which
+ then also associates the individual components in the block with locations specifed in the
+ grid.
Attributes
----------
@@ -222,13 +217,13 @@ class GridBlueprint(yamlize.Object):
An x/y/z pitch or hex pitch with grid dimensions in cm. This is used to specify a
uniform grid, such as Cartesian or Hex. Mutually exclusive with gridBounds.
gridBounds : dict
- A dictionary containing explicit grid boundaries. Specific keys used will depend
- on the type of grid being defined. Mutually exclusive with latticeDimensions.
+ A dictionary containing explicit grid boundaries. Specific keys used will depend on the type
+ of grid being defined. Mutually exclusive with latticeDimensions.
symmetry : str
A string defining the symmetry mode of the grid
gridContents : dict
- A {(i,j): str} dictionary mapping spatialGrid indices
- in 2-D to string specifiers of what's supposed to be in the grid.
+ A {(i,j): str} dictionary mapping spatialGrid indices in 2-D to string specifiers of what's
+ supposed to be in the grid.
"""
name = yamlize.Attribute(key="name", type=str)
@@ -245,10 +240,9 @@ class GridBlueprint(yamlize.Object):
)
),
)
- # gridContents is the final form of grid contents information;
- # it is set regardless of how the input is read. When writing, we attempt to
- # preserve the input mode and write ascii map if that was what was originally
- # provided.
+ # gridContents is the final form of grid contents information; it is set regardless of how the
+ # input is read. When writing, we attempt to preserve the input mode and write ascii map if that
+ # was what was originally provided.
gridContents = yamlize.Attribute(key="grid contents", type=dict, default=None)
@gridContents.validator
@@ -284,9 +278,9 @@ def __init__(
setattr this is only needed for when you want to make this object from a non-YAML
source.
- .. warning:: This is a Yamlize object, so ``__init__`` never really gets called.
- Only ``__new__`` does.
-
+ Warning
+ -------
+ This is a Yamlize object, so ``__init__`` never really gets called. Only ``__new__`` does.
"""
self.name = name
self.geom = str(geom)
@@ -299,8 +293,8 @@ def __init__(
@property
def readFromLatticeMap(self):
"""
- This is implemented as a property, since as a Yamlize object, __init__ is not
- always called and we have to lazily evaluate its default value.
+ This is implemented as a property, since as a Yamlize object, ``__init__`` is not always called
+ and we have to lazily evaluate its default value.
"""
return getattr(self, "_readFromLatticeMap", False)
@@ -318,9 +312,8 @@ def _constructSpatialGrid(self):
"""
Build spatial grid.
- If you do not enter latticeDimensions, a unit grid will be produced which must
- be adjusted to the proper dimensions (often by inspection of children) at a
- later time.
+ If you do not enter ``latticeDimensions``, a unit grid will be produced which must be adjusted
+ to the proper dimensions (often by inspection of children) at a later time.
"""
symmetry = (
geometry.SymmetryType.fromStr(self.symmetry) if self.symmetry else None
@@ -345,8 +338,8 @@ def _constructSpatialGrid(self):
)
# convert to list, otherwise it is a CommentedSeq
- theta = numpy.array(self.gridBounds["theta"])
- radii = numpy.array(self.gridBounds["r"])
+ theta = np.array(self.gridBounds["theta"])
+ radii = np.array(self.gridBounds["r"])
for lst, name in ((theta, "theta"), (radii, "radii")):
if not isMonotonic(lst, "<"):
raise InputError(
@@ -424,11 +417,12 @@ def expandToFull(self):
"""
Unfold the blueprints to represent full symmetry.
- .. note:: This relatively rudimentary, and copies entries from the
- currently-represented domain to their corresponding locations in full
- symmetry. This may not produce the desired behavior for some scenarios,
- such as when expanding fuel shuffling paths or the like. Future work may
- make this more sophisticated.
+ Notes
+ -----
+ This relatively rudimentary, and copies entries from the currently-represented domain to
+ their corresponding locations in full symmetry. This may not produce the desired behavior
+ for some scenarios, such as when expanding fuel shuffling paths or the like. Future work may
+ make this more sophisticated.
"""
if (
geometry.SymmetryType.fromAny(self.symmetry).domain
@@ -478,8 +472,8 @@ def _readGridContents(self):
def _readGridContentsLattice(self):
"""Read an ascii map of grid contents.
- This update the gridContents attribute, which is a dict mapping grid i,j,k
- indices to textual specifiers (e.g. ``IC``))
+ This update the gridContents attribute, which is a dict mapping grid i,j,k indices to
+ textual specifiers (e.g. ``IC``))
"""
self.readFromLatticeMap = True
symmetry = geometry.SymmetryType.fromStr(self.symmetry)
@@ -603,21 +597,21 @@ def saveToStream(stream, bluep, full=False, tryMap=False):
"""
Save the blueprints to the passed stream.
- This can save either the entire blueprints, or just the `grids:` section of the
- blueprints, based on the passed ``full`` argument. Saving just the grid
- blueprints can be useful when cobbling blueprints together with !include flags.
+ This can save either the entire blueprints, or just the `grids:` section of the blueprints,
+ based on the passed ``full`` argument. Saving just the grid blueprints can be useful when
+ cobbling blueprints together with !include flags.
.. impl:: Write a blueprint file from a blueprint object.
:id: I_ARMI_BP_TO_DB
:implements: R_ARMI_BP_TO_DB
- First makes a copy of the blueprints that are passed in. Then modifies
- any grids specified in the blueprints into a canonical lattice map style,
- if needed. Then uses the ``dump`` method that is inherent to all ``yamlize``
- subclasses to write the blueprints to the given ``stream`` object.
+ First makes a copy of the blueprints that are passed in. Then modifies any grids specified
+ in the blueprints into a canonical lattice map style, if needed. Then uses the ``dump``
+ method that is inherent to all ``yamlize`` subclasses to write the blueprints to the given
+ ``stream`` object.
- If called with the ``full`` argument, the entire blueprints is dumped.
- If not, only the grids portion is dumped.
+ If called with the ``full`` argument, the entire blueprints is dumped. If not, only the
+ grids portion is dumped.
Parameters
----------
diff --git a/armi/reactor/blueprints/isotopicOptions.py b/armi/reactor/blueprints/isotopicOptions.py
index 077db9f21..dfb6af5d9 100644
--- a/armi/reactor/blueprints/isotopicOptions.py
+++ b/armi/reactor/blueprints/isotopicOptions.py
@@ -387,21 +387,18 @@ def apply(self, material):
Parameters
----------
- material : Material
+ material : armi.materials.material.Material
An ARMI Material instance.
"""
material.massFrac = dict(self.massFracs)
if self.density is not None:
if not isinstance(material, materials.Custom):
- runLog.warning(
- "You either specified a custom mass density or number densities "
- "(which implies a mass density) on `{}` with custom isotopics `{}`. "
- "This has no effect on this Material class; you can only "
- "override mass density on `Custom` "
- "materials. Consider switching to number fraction input. "
- "Continuing to use {} mass density.".format(
- material, self.name, material
- )
+ runLog.important(
+ "A custom density or number densities has been specified for non-custom "
+ "material {}. The material object's density will not be updated to prevent unintentional "
+ "density changes across the model. Only custom materials may have a density "
+ "specified.".format(material),
+ single=True,
)
# specifically, non-Custom materials only use refDensity and dLL, mat.customDensity has no effect
return
@@ -427,7 +424,7 @@ def apply(self, material, customIsotopicsName):
Parameters
----------
- material : Material
+ material : armi.materials.material.Material
Material instance to adjust.
customIsotopicName : str
diff --git a/armi/reactor/blueprints/reactorBlueprint.py b/armi/reactor/blueprints/reactorBlueprint.py
index 40e802cc5..a1481c360 100644
--- a/armi/reactor/blueprints/reactorBlueprint.py
+++ b/armi/reactor/blueprints/reactorBlueprint.py
@@ -15,18 +15,17 @@
"""
Definitions of top-level reactor arrangements like the Core (default), SFP, etc.
-See documentation of blueprints in :doc:`/user/inputs/blueprints` for more context. See
-example in :py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.
+See documentation of blueprints in :doc:`/user/inputs/blueprints` for more context. See example in
+:py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.
-This was built to replace the old system that loaded the core geometry from the
-cs['geometry'] setting. Until the geom file-based input is completely removed, this
-system will attempt to migrate the core layout from geom files. When geom files are
-used, explicitly specifying a ``core`` system will result in an error.
+This was built to replace the old system that loaded the core geometry from the ``cs['geometry']``
+setting. Until the geom file-based input is completely removed, this system will attempt to migrate
+the core layout from geom files. When geom files are used, explicitly specifying a ``core`` system
+will result in an error.
-System Blueprints are a big step in the right direction to generalize user input, but
-was still mostly adapted from the old Core layout input. As such, they still only really
-support Core-like systems. Future work should generalize the concept of "system" to more
-varied scenarios.
+System Blueprints are a big step in the right direction to generalize user input, but was still
+mostly adapted from the old Core layout input. As such, they still only really support Core-like
+systems. Future work should generalize the concept of "system" to more varied scenarios.
See Also
--------
@@ -34,7 +33,6 @@
armi.reactor.systemLayoutInput.SystemLayoutInput : Deprecated method for reading the individual
face-map xml files.
"""
-import tabulate
import yamlize
from armi import context
@@ -43,6 +41,7 @@
from armi.reactor import geometry
from armi.reactor import grids
from armi.reactor.blueprints.gridBlueprint import Triplet
+from armi.utils import tabulate
class SystemBlueprint(yamlize.Object):
@@ -61,17 +60,20 @@ class SystemBlueprint(yamlize.Object):
which is in turn included into the overall blueprints within
:py:class:`~armi.reactor.blueprints.Blueprints`.
- This class includes a :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct`
- method, which is typically called from within :py:func:`~armi.reactor.reactors.factory`
- during the initialization of the reactor object to instantiate the core
- and/or spent fuel pool objects. During that process, a spatial grid is
- constructed based on the grid blueprints specified in the "grids" section
- of the blueprints (see :need:`I_ARMI_BP_GRID`) and the assemblies needed
- to fill the lattice are built from blueprints using :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
-
- .. note:: We use string keys to link grids to objects that use them. This differs
- from how blocks/assembies are specified, which use YAML anchors. YAML anchors
- have proven to be problematic and difficult to work with
+ This class includes a
+ :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct` method, which
+ is typically called from within :py:func:`~armi.reactor.reactors.factory` during the
+ initialization of the reactor object to instantiate the core and/or spent fuel pool objects.
+ During that process, a spatial grid is constructed based on the grid blueprints specified in
+ the "grids" section of the blueprints (see :need:`I_ARMI_BP_GRID`) and the assemblies needed
+ to fill the lattice are built from blueprints using
+ :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
+
+ Notes
+ -----
+ We use string keys to link grids to objects that use them. This differs from how blocks/
+ assembies are specified, which use YAML anchors. YAML anchors have proven to be problematic and
+ difficult to work with.
"""
name = yamlize.Attribute(key="name", type=str)
@@ -94,11 +96,10 @@ def __init__(self, name=None, gridName=None, origin=None):
@staticmethod
def _resolveSystemType(typ: str):
- # Loop over all plugins that could be attached and determine if any
- # tell us how to build a specific systems attribute. Sub-optimial
- # as this check is called for each system (e.g., core, spent fuel pool).
- # It is assumed that the number of systems is currently low enough to justify
- # this structure.
+ # Loop over all plugins that could be attached and determine if any tell us how to build a
+ # specific systems attribute. Sub-optimial as this check is called for each system (e.g.,
+ # core, spent fuel pool). It is assumed that the number of systems is currently low enough
+ # to justify this structure.
manager = getPluginManagerOrFail()
@@ -106,9 +107,8 @@ def _resolveSystemType(typ: str):
seen = set()
for options in manager.hook.defineSystemBuilders():
for key, builder in options.items():
- # Take the first match we find. This would allow other plugins to
- # define a new core builder before finding those defined by the
- # ReactorPlugin
+ # Take the first match we find. This would allow other plugins to define a new core
+ # builder before finding those defined by the ReactorPlugin
if key == typ:
return builder
seen.add(key)
@@ -144,7 +144,7 @@ def construct(self, cs, bp, reactor, geom=None, loadAssems=True):
"""
from armi.reactor import reactors # avoid circular import
- runLog.info("Constructing the `{}`".format(self.name))
+ runLog.info(f"Constructing the `{self.name}`")
if geom is not None and self.name == "core":
gridDesign = geom.toGridBlueprints("core")[0]
@@ -196,9 +196,7 @@ def construct(self, cs, bp, reactor, geom=None, loadAssems=True):
return system
def _loadAssemblies(self, cs, container, gridContents, bp):
- runLog.header(
- "=========== Adding Assemblies to {} ===========".format(container)
- )
+ runLog.header(f"=========== Adding Assemblies to {container} ===========")
badLocations = set()
for locationInfo, aTypeID in gridContents.items():
newAssembly = bp.constructAssem(cs, specifier=aTypeID)
@@ -232,9 +230,7 @@ def _modifyGeometry(self, container, gridDesign):
# (unless specified on input)
if not gridDesign.latticeDimensions:
runLog.info(
- "Updating spatial grid pitch data for {} geometry".format(
- container.geomType
- )
+ f"Updating spatial grid pitch data for {container.geomType} geometry"
)
if container.geomType == geometry.GeomType.HEX:
container.spatialGrid.changePitch(container[0][0].getPitch())
@@ -258,27 +254,22 @@ def summarizeMaterialData(container):
Any Core object with Blocks and Components defined.
"""
runLog.header(
- "=========== Summarizing Source of Material Data for {} ===========".format(
- container
- )
+ f"=========== Summarizing Source of Material Data for {container} ==========="
)
materialNames = set()
materialData = []
for c in container.iterComponents():
if c.material.name in materialNames:
continue
- materialData.append((c.material.name, c.material.DATA_SOURCE, False))
+ materialData.append((c.material.name, c.material.DATA_SOURCE))
materialNames.add(c.material.name)
+
materialData = sorted(materialData)
runLog.info(
tabulate.tabulate(
- tabular_data=materialData,
- headers=[
- "Material Name",
- "Source Location",
- "Property Data was Modified\nfrom the Source?",
- ],
- tablefmt="armi",
+ data=materialData,
+ headers=["Material Name", "Source Location"],
+ tableFmt="armi",
)
)
return materialData
diff --git a/armi/reactor/blueprints/tests/test_blockBlueprints.py b/armi/reactor/blueprints/tests/test_blockBlueprints.py
index a06964b59..6215bee46 100644
--- a/armi/reactor/blueprints/tests/test_blockBlueprints.py
+++ b/armi/reactor/blueprints/tests/test_blockBlueprints.py
@@ -332,7 +332,6 @@ def test_explicitFlags(self):
self.assertTrue(a1.hasFlags(Flags.FUEL, exact=True))
self.assertTrue(a2.hasFlags(Flags.FUEL | Flags.TEST, exact=True))
- # TODO: This test passes, but shouldn't.
def test_densityConsistentWithComponentConstructor(self):
a1 = self.blueprints.assemDesigns.bySpecifier["IC"].construct(
self.cs, self.blueprints
diff --git a/armi/reactor/blueprints/tests/test_blueprints.py b/armi/reactor/blueprints/tests/test_blueprints.py
index 596816371..d63c12467 100644
--- a/armi/reactor/blueprints/tests/test_blueprints.py
+++ b/armi/reactor/blueprints/tests/test_blueprints.py
@@ -35,19 +35,10 @@
class TestBlueprints(unittest.TestCase):
- """Test that the basic functionality of faithfully receiving user input to construct
- ARMI data model objects works as expected.
+ """Test that the basic functionality of faithfully receiving user input to construct ARMI data
+ model objects works as expected.
- Values are hopefully not hardcoded in here, just sanity checks that nothing messed
- up as this is code has VERY high incidental coverage from other tests.
-
- NOTE: as it stands it seems a little hard to test more granularity with the
- blueprints file as each initialization is intended to be a complete load from the
- input file, and each load also
- makes calls out to the reactor for some assembly initialization steps.
-
- TODO: see the above note, and try to test blueprints on a wider range of input
- files, touching on each failure case.
+ Try to ensure you test for ideas and not exact matches here, to make the tests more robust.
"""
@classmethod
diff --git a/armi/reactor/blueprints/tests/test_customIsotopics.py b/armi/reactor/blueprints/tests/test_customIsotopics.py
index 3366a3c36..4c341b45b 100644
--- a/armi/reactor/blueprints/tests/test_customIsotopics.py
+++ b/armi/reactor/blueprints/tests/test_customIsotopics.py
@@ -14,6 +14,7 @@
"""Unit test custom isotopics."""
import unittest
+from logging import DEBUG
import yamlize
@@ -22,10 +23,12 @@
from armi.reactor import blueprints
from armi.reactor.blueprints import isotopicOptions
from armi.reactor.flags import Flags
+from armi import runLog
+from armi.tests import mockRunLogs
class TestCustomIsotopics(unittest.TestCase):
- yamlString = r"""
+ yamlPreamble = r"""
nuclide flags:
U238: {burn: true, xs: true}
U235: {burn: true, xs: true}
@@ -75,15 +78,6 @@ class TestCustomIsotopics(unittest.TestCase):
U234: 0.000054
density: 19.1
- # >>> from armi.nucDirectory import elements, nuclideBases
- # >>> import numpy
- # >>> u = elements.bySymbol['U']
- # >>> w_i = numpy.array([n.abundance for n in u.getNaturalIsotopics()])
- # >>> Mi = numpy.array([n.weight for n in u.getNaturalIsotopics()])
- # >>> Ni = w_i * 19.1 * 6.0221e23 / Mi
- # >>> N_norm = Ni / sum(Ni)
- # >>> N_norm.round(6)
- # array([ 5.50000000e-05, 7.29500000e-03, 9.92650000e-01])
uranium isotopic number fractions:
input format: number fractions
U238: 0.992650
@@ -91,21 +85,26 @@ class TestCustomIsotopics(unittest.TestCase):
U234: 0.000055
density: 19.1
- # >>> from armi.nucDirectory import elements, nuclideBases
- # >>> import numpy
- # >>> u = elements.bySymbol['U']
- # >>> Mi = numpy.array([n.weight for n in u.getNaturalIsotopics()])
- # >>> w_i = numpy.array([n.abundance for n in u.getNaturalIsotopics()])
- # >>> Ni = 19.1 * w_i * 6.0221e23 / Mi
- # array([ 2.65398007e+18, 3.52549755e+20, 4.79692055e+22])
- # >>> for n, ni in zip(u.getNaturalIsotopics(), Ni):
- # >>> print ' {}: {:.7e}'.format(n.name, ni) # requires 7 decimal places!
uranium isotopic number densities: &u_isotopics
input format: number densities
U234: 2.6539102e-06
U235: 3.5254048e-04
U238: 4.7967943e-02
+ bad uranium isotopic mass fractions:
+ input format: mass fractions
+ U238: 0.992742
+ U235: 0.007204
+ U234: 0.000054
+ density: 0
+
+ negative uranium isotopic mass fractions:
+ input format: mass fractions
+ U238: 0.992742
+ U235: 0.007204
+ U234: 0.000054
+ density: -1
+
linked uranium number densities: *u_isotopics
steel:
@@ -114,6 +113,9 @@ class TestCustomIsotopics(unittest.TestCase):
C: 0.3
density: 7.0
+"""
+
+ yamlGoodBlocks = r"""
blocks:
uzr fuel: &block_0
fuel: &basic_fuel
@@ -163,7 +165,72 @@ class TestCustomIsotopics(unittest.TestCase):
material: Custom
isotopics: linked uranium number densities
- steel: &block_6
+ fuel with no modifications: &block_6 # after a custom density has been set
+ fuel:
+ <<: *basic_fuel
+
+ overspecified fuel: &block_7
+ fuel:
+ <<: *basic_fuel
+ material: UraniumOxide
+ isotopics: uranium isotopic number densities
+
+ density set via number density: &block_8
+ fuel:
+ <<: *basic_fuel
+ isotopics: uranium isotopic number densities
+
+ steel: &block_9
+ clad:
+ shape: Hexagon
+ material: Custom
+ isotopics: steel
+ Tinput: 25.0
+ Thot: 600.0
+ ip: 0.0
+ mult: 169.0
+ op: 0.86602
+
+assemblies:
+ fuel a: &assembly_a
+ specifier: IC
+ blocks: [*block_0, *block_1, *block_2, *block_3, *block_4, *block_5, *block_6, *block_7, *block_8, *block_9]
+ height: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
+ axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ xs types: [A, A, A, A, A, A, A, A, A, A]
+ material modifications:
+ TD_frac: ["", "", "", "", "", "", "", 0.1, "", ""]
+
+"""
+
+ yamlBadBlocks = r"""
+blocks:
+ uzr fuel: &block_0
+ fuel: &basic_fuel
+ shape: Hexagon
+ material: UZr
+ Tinput: 25.0
+ Thot: 600.0
+ ip: 0.0
+ mult: 1.0
+ op: 10.0
+
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ mult: 1.0
+ od: 10.0
+
+ custom void: &block_1
+ fuel:
+ <<: *basic_fuel
+ material: Void
+ isotopics: uranium isotopic number densities
+
+ steel: &block_2
clad:
shape: Hexagon
material: Custom
@@ -174,16 +241,65 @@ class TestCustomIsotopics(unittest.TestCase):
mult: 169.0
op: 0.86602
+ no density uo2: &block_3
+ fuel:
+ <<: *basic_fuel
+ material: UraniumOxide
+ isotopics: uranium isotopic number densities
+
+ no density uo2: &block_4
+ fuel:
+ <<: *basic_fuel
+ material: UraniumOxide
+ isotopics: bad uranium isotopic mass fractions
+
+ no density uo2: &block_5
+ fuel:
+ <<: *basic_fuel
+ material: UraniumOxide
+ isotopics: bad uranium isotopic mass fractions
assemblies:
fuel a: &assembly_a
specifier: IC
- blocks: [*block_0, *block_1, *block_2, *block_3, *block_4, *block_5, *block_6]
- height: [10, 10, 10, 10, 10, 10,10]
- axial mesh points: [1, 1, 1, 1, 1, 1,1]
- xs types: [A, A, A, A, A, A,A]
+ blocks: [*block_0, *block_1, *block_2]
+ height: [10, 10, 10]
+ axial mesh points: [1, 1, 1]
+ xs types: [A, A, A]
+ material modifications:
+ TD_frac: ["", "", ""]
+
+ fuel b: &assembly_b
+ specifier: IC
+ blocks: [*block_0, *block_3, *block_2]
+ height: [10, 10, 10]
+ axial mesh points: [1, 1, 1]
+ xs types: [A, A, A]
+ material modifications:
+ TD_frac: ["", "0.0", ""] # set density to 0 to cause error in custom density
+
+ fuel c: &assembly_c
+ specifier: IC
+ blocks: [*block_0, *block_4, *block_2]
+ height: [10, 10, 10]
+ axial mesh points: [1, 1, 1]
+ xs types: [A, A, A]
+
+ fuel d: &assembly_d
+ specifier: IC
+ blocks: [*block_0, *block_5, *block_2]
+ height: [10, 10, 10]
+ axial mesh points: [1, 1, 1]
+ xs types: [A, A, A]
+
"""
+
+ # this yaml is supposed to successfully build
+ yamlString = yamlPreamble + yamlGoodBlocks
+
+ # This yaml is designed to raise an error when built
+ yamlStringWithError = yamlPreamble + yamlBadBlocks
""":meta hide-value:"""
@classmethod
@@ -216,18 +332,89 @@ def test_massFractionsAreApplied(self):
:id: T_ARMI_MAT_USER_INPUT3
:tests: R_ARMI_MAT_USER_INPUT
"""
- fuel0 = self.a[0].getComponent(Flags.FUEL)
fuel1 = self.a[1].getComponent(Flags.FUEL)
fuel2 = self.a[2].getComponent(Flags.FUEL)
self.assertEqual(self.numCustomNuclides, len(fuel1.p.numberDensities))
self.assertAlmostEqual(19.1, fuel1.density())
- # density only works with a Custom material type.
- self.assertAlmostEqual(fuel0.density(), fuel2.density())
self.assertEqual(
set(fuel2.p.numberDensities.keys()), set(fuel1.p.numberDensities.keys())
) # keys are same
+ def test_densitiesAppliedToNonCustomMaterials(self):
+ """Ensure that a density can be set in custom isotopics for components using library materials."""
+ # The template block
+ fuel0 = self.a[0].getComponent(Flags.FUEL)
+ # The block with custom density but not the 'Custom' material
+ fuel2 = self.a[2].getComponent(Flags.FUEL)
+ # A block like the template block, but made after the custom block
+ fuel6 = self.a[6].getComponent(Flags.FUEL)
+ # A block with custom density set via number density
+ fuel8 = self.a[8].getComponent(Flags.FUEL)
+
+ # Check that the density is set correctly on the custom density block,
+ # and that it is not the same as the original
+ self.assertAlmostEqual(19.1, fuel2.density())
+ self.assertNotAlmostEqual(fuel0.density(), fuel2.density(), places=2)
+ # Check that the custom density block has the correct material
+ self.assertEqual("UZr", fuel2.material.name)
+ # Check that the block with only number densities set has a new density
+ self.assertAlmostEqual(19.1, fuel8.density())
+ # original material density should not be changed after setting a custom density component,
+ # so a new block without custom isotopics and density should have the same density as the original
+ self.assertAlmostEqual(fuel6.density(), fuel0.density())
+ self.assertEqual(fuel6.material.name, fuel0.material.name)
+ self.assertEqual("UZr", fuel0.material.name)
+
+ def test_customDensityLogsAndErrors(self):
+ """Test that the right warning messages and errors are emitted when applying custom densities."""
+ # Check for warnings when specifying both TD_frac and custom isotopics
+ with mockRunLogs.BufferLog() as mockLog:
+ # we should start with a clean slate
+ self.assertEqual("", mockLog.getStdout())
+ runLog.LOG.startLog("test_customDensityLogsAndErrors")
+ runLog.LOG.setVerbosity(DEBUG)
+
+ # rebuild the input to capture the logs
+ cs = settings.Settings()
+ cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"})
+ bp = blueprints.Blueprints.load(self.yamlString)
+ bp.constructAssem(cs, name="fuel a")
+
+ # Check for log messages
+ streamVal = mockLog.getStdout()
+ self.assertIn("Both TD_frac and a custom density", streamVal, msg=streamVal)
+ self.assertIn(
+ "A custom material density was specified", streamVal, msg=streamVal
+ )
+ self.assertIn(
+ "A custom density or number densities has been specified",
+ streamVal,
+ msg=streamVal,
+ )
+
+ # Check that assigning a custom density to the Void material fails
+ cs = settings.Settings()
+ cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"})
+ bp = blueprints.Blueprints.load(self.yamlStringWithError)
+ # Ensure we have some Void
+ self.assertEqual(bp.blockDesigns["custom void"]["fuel"].material, "Void")
+ # Can't have stuff in Void
+ with self.assertRaises(ValueError):
+ bp.constructAssem(cs, name="fuel a")
+
+ # Try making a 0 density non-Void material by setting TD_frac to 0.0
+ with self.assertRaises(ValueError):
+ bp.constructAssem(cs, name="fuel b")
+
+ # Try making a material with mass fractions with a density of 0
+ with self.assertRaises(ValueError):
+ bp.constructAssem(cs, name="fuel c")
+
+ # Try making a material with mass fractions with a negative density
+ with self.assertRaises(ValueError):
+ bp.constructAssem(cs, name="fuel d")
+
def test_numberFractions(self):
"""Ensure that the custom isotopics can be specified via number fractions.
diff --git a/armi/reactor/blueprints/tests/test_materialModifications.py b/armi/reactor/blueprints/tests/test_materialModifications.py
index bd7b458cb..00d45d92d 100644
--- a/armi/reactor/blueprints/tests/test_materialModifications.py
+++ b/armi/reactor/blueprints/tests/test_materialModifications.py
@@ -54,13 +54,41 @@ class TestMaterialModifications(unittest.TestCase):
xs types: [A]
"""
- boronInput = uZrInput.replace("UZr", "B")
+ b4cInput = r"""
+nuclide flags:
+ B: {burn: false, xs: true}
+ C: {burn: false, xs: true}
+blocks:
+ poison: &block_poison
+ poison:
+ shape: Hexagon
+ material: B4C
+ Tinput: 600.0
+ Thot: 600.0
+ ip: 0.0
+ mult: 1
+ op: 10.0
+assemblies:
+ assem a: &assembly_a
+ specifier: IC
+ blocks: [*block_poison]
+ height: [1.0]
+ axial mesh points: [1]
+ xs types: [A]
+"""
def loadUZrAssembly(self, materialModifications):
- yamlString = self.uZrInput + "\n" + materialModifications
+ return self._loadAssembly(self.uZrInput, materialModifications, "fuel a")
+
+ @staticmethod
+ def _loadAssembly(bpBase: str, materialModifications: str, assem: str):
+ yamlString = bpBase + "\n" + materialModifications
design = blueprints.Blueprints.load(yamlString)
design._prepConstruction(settings.Settings())
- return design.assemblies["fuel a"]
+ return design.assemblies[assem]
+
+ def loadB4CAssembly(self, materialModifications: str):
+ return self._loadAssembly(self.b4cInput, materialModifications, "assem a")
def test_noMaterialModifications(self):
a = self.loadUZrAssembly("")
@@ -306,3 +334,15 @@ def test_matModsUpTheMRO(self):
U: 1
"""
)
+
+ def test_theoreticalDensity(self):
+ """Test the theoretical density can be loaded from material modifications."""
+ mods = """
+ material modifications:
+ TD_frac: [0.5]
+ """
+ a = self.loadB4CAssembly(mods)
+ comp = a[0][0]
+ mat = comp.material
+ self.assertEqual(mat.getTD(), 0.5)
+ self.assertEqual(comp.p.theoreticalDensityFrac, 0.5)
diff --git a/armi/reactor/blueprints/tests/test_reactorBlueprints.py b/armi/reactor/blueprints/tests/test_reactorBlueprints.py
index c50645c52..f7d5f6180 100644
--- a/armi/reactor/blueprints/tests/test_reactorBlueprints.py
+++ b/armi/reactor/blueprints/tests/test_reactorBlueprints.py
@@ -122,11 +122,7 @@ def test_construct(self):
def test_materialDataSummary(self):
"""Test that the material data summary for the core is valid as a printout to the stdout."""
- expectedMaterialData = [
- ("Custom", "ARMI", False),
- ("HT9", "ARMI", False),
- ("UZr", "ARMI", False),
- ]
+ expectedMaterialData = [("Custom", "ARMI"), ("HT9", "ARMI"), ("UZr", "ARMI")]
core, _sfp = self._setupReactor()
materialData = reactorBlueprint.summarizeMaterialData(core)
for actual, expected in zip(materialData, expectedMaterialData):
diff --git a/armi/reactor/components/__init__.py b/armi/reactor/components/__init__.py
index ccb1b8c22..516cdae81 100644
--- a/armi/reactor/components/__init__.py
+++ b/armi/reactor/components/__init__.py
@@ -29,13 +29,13 @@
# ruff: noqa: F405
import math
-import numpy
+import numpy as np
from armi import runLog
-from armi.reactor.components.component import * # noqa: undefined-local-with-import-star
-from armi.reactor.components.basicShapes import * # noqa: undefined-local-with-import-star
-from armi.reactor.components.complexShapes import * # noqa: undefined-local-with-import-star
-from armi.reactor.components.volumetricShapes import * # noqa: undefined-local-with-import-star
+from armi.reactor.components.component import * # noqa: F403
+from armi.reactor.components.basicShapes import * # noqa: F403
+from armi.reactor.components.complexShapes import * # noqa: F403
+from armi.reactor.components.volumetricShapes import * # noqa: F403
def factory(shape, bcomps, kwargs):
@@ -132,7 +132,7 @@ def __init__(
material,
Tinput,
Thot,
- area=numpy.NaN,
+ area=np.NaN,
modArea=None,
isotopics=None,
mergeWith=None,
@@ -158,7 +158,7 @@ def getComponentArea(self, cold=False):
Parameters
----------
cold : bool, optional
- Ignored for this component
+ If True, compute the area with as-input dimensions, instead of thermally-expanded.
"""
coldArea = self.p.area
if cold:
@@ -173,6 +173,13 @@ def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
This is the smallest it can possibly be. Since this is used to determine
the outer component, it will never be allowed to be the outer one.
+ Parameters
+ ----------
+ Tc : float
+ Ignored for this component
+ cold : bool, optional
+ If True, compute the area with as-input dimensions, instead of thermally-expanded.
+
Notes
-----
Tc is not used in this method for this particular component.
@@ -217,12 +224,12 @@ def __init__(
material,
Tinput,
Thot,
- area=numpy.NaN,
+ area=np.NaN,
op=None,
isotopics=None,
mergeWith=None,
components=None,
- volume=numpy.NaN,
+ volume=np.NaN,
):
Component.__init__(
self,
@@ -341,31 +348,39 @@ def computeVolume(self):
"""
return self._deriveVolumeAndArea()
+ def getMaxVolume(self):
+ """
+ The maximum volume of the parent Block.
+
+ Returns
+ -------
+ vol : float
+ volume in cm^3.
+ """
+ return self.parent.getMaxArea() * self.parent.getHeight()
+
def _deriveVolumeAndArea(self):
"""
Derive the volume and area of a ``DerivedShape``.
Notes
-----
- If a parent exists, this will iterate over it and then determine
- both the volume and area based on its context within the scope
- of the parent object by considering the volumes and areas of
- the surrounding components.
+ If a parent exists, this will iterate over it and then determine both the volume and area
+ based on its context within the scope of the parent object by considering the volumes and
+ areas of the surrounding components.
- Since some components are volumetric shapes, this must consider the volume
- so that it wraps around in all three dimensions.
+ Since some components are volumetric shapes, this must consider the volume so that it wraps
+ around in all three dimensions.
- But there are also situations where we need to handle zero-height blocks
- with purely 2D components. Thus we track area and volume fractions here
- when possible.
+ But there are also situations where we need to handle zero-height blocks with purely 2D
+ components. Thus we track area and volume fractions here when possible.
"""
if self.parent is None:
raise ValueError(
f"Cannot compute volume/area of {self} without a parent object."
)
- # Determine the volume/areas of the non-derived shape components
- # within the parent.
+ # Determine the volume/areas of the non-derived shape components within the parent.
siblingVolume = 0.0
siblingArea = 0.0
for sibling in self.parent.getChildren():
@@ -380,10 +395,10 @@ def _deriveVolumeAndArea(self):
try:
if siblingArea is not None:
siblingArea += sibling.getArea()
- except: # noqa: bare-except
+ except Exception:
siblingArea = None
- remainingVolume = self.parent.getMaxVolume() - siblingVolume
+ remainingVolume = self.getMaxVolume() - siblingVolume
if siblingArea:
remainingArea = self.parent.getMaxArea() - siblingArea
@@ -393,8 +408,8 @@ def _deriveVolumeAndArea(self):
f"The component areas in {self.parent} exceed the maximum "
"allowable volume based on the geometry. Check that the "
"geometry is defined correctly.\n"
- f"Maximum allowable volume: {self.parent.getMaxVolume()} cm^3\n"
- f"Volume of all non-derived shape components: {siblingVolume} cm^3\n"
+ f"Maximum allowable volume: {self.getMaxVolume()} "
+ f"cm^3\nVolume of all non-derived shape components: {siblingVolume} cm^3\n"
)
runLog.error(msg)
raise ValueError(
@@ -443,8 +458,17 @@ def getComponentArea(self, cold=False):
Parameters
----------
cold : bool, optional
- Ignored for this component
+ If True, compute the area with as-input dimensions, instead of thermally-expanded.
"""
+ if cold:
+ # At cold temp, the DerivedShape has the area of the parent minus the other siblings
+ parentArea = self.parent.getArea()
+ # NOTE: the assumption is there is only one DerivedShape in each Component
+ siblings = sum(
+ [c.getArea(cold=True) for c in self.parent if type(c) != DerivedShape]
+ )
+ return parentArea - siblings
+
if self.parent.derivedMustUpdate:
self.computeVolume()
diff --git a/armi/reactor/components/component.py b/armi/reactor/components/component.py
index 4cebed503..e00af7860 100644
--- a/armi/reactor/components/component.py
+++ b/armi/reactor/components/component.py
@@ -20,7 +20,7 @@
import copy
import re
-import numpy
+import numpy as np
from armi import materials
from armi import runLog
@@ -30,8 +30,8 @@
from armi.materials import void
from armi.nucDirectory import nuclideBases
from armi.reactor import composites
-from armi.reactor import parameters
from armi.reactor import flags
+from armi.reactor import parameters
from armi.reactor.components import componentParameters
from armi.utils import densityTools
from armi.utils.units import C_TO_K
@@ -123,10 +123,11 @@ class ComponentType(composites.CompositeModelType):
in order to conform them to the correct format. Additionally, the constructors
arguments can be used to determine the Component subclasses dimensions.
- .. warning:: The import-time metaclass-based component subclass registration was a
- good idea, but in practice has caused significant confusion and trouble. We will
- replace this soon with an explicit plugin-based component subclass registration
- system.
+ Warning
+ -------
+ The import-time metaclass-based component subclass registration was a good idea, but in practice
+ has caused significant confusion and trouble. We will replace this soon with an explicit
+ plugin-based component subclass registration system.
"""
TYPES = dict() #: :meta hide-value:
@@ -225,6 +226,8 @@ class Component(composites.Composite, metaclass=ComponentType):
pDefs = componentParameters.getComponentParameterDefinitions()
+ material: materials.Material
+
def __init__(
self,
name,
@@ -278,7 +281,7 @@ def __lt__(self, other):
thatOD = other.getBoundingCircleOuterDiameter(cold=True)
try:
return thisOD < thatOD
- except: # noqa: bare-except
+ except Exception:
raise ValueError(
"Components 1 ({} with OD {}) and 2 ({} and OD {}) cannot be ordered because their "
"bounding circle outer diameters are not comparable.".format(
@@ -325,7 +328,7 @@ def resolveLinkedDims(self, components):
comp = components[name]
linkedKey = match.group(2)
self.p[dimName] = _DimensionLink((comp, linkedKey))
- except: # noqa: bare-except
+ except Exception:
if value.count(".") > 1:
raise ValueError(
"Component names should not have periods in them: `{}`".format(
@@ -549,11 +552,10 @@ def _checkNegativeArea(self, area, cold):
which may be placed between components that will overlap during thermal expansion
(such as liners and cladding and annular fuel).
- Overlapping is allowed to maintain conservation of atoms while sticking close
- to the as-built geometry. Modules that need true geometries will have to
- handle this themselves.
+ Overlapping is allowed to maintain conservation of atoms while sticking close to the
+ as-built geometry. Modules that need true geometries will have to handle this themselves.
"""
- if numpy.isnan(area):
+ if np.isnan(area):
return
if area < 0.0:
@@ -576,7 +578,7 @@ def _checkNegativeVolume(self, volume):
--------
self._checkNegativeArea
"""
- if numpy.isnan(volume):
+ if np.isnan(volume):
return
if volume < 0.0 and self.containsSolidMaterial():
@@ -1097,7 +1099,8 @@ def mergeNuclidesInto(self, compToMergeWith):
"""
Set another component's number densities to reflect this one merged into it.
- You must also modify the geometry of the other component and remove this component to conserve atoms.
+ You must also modify the geometry of the other component and remove this component to
+ conserve atoms.
"""
# record pre-merged number densities and areas
aMe = self.getArea()
@@ -1133,11 +1136,11 @@ def backUp(self):
self._restoreLinkedDims(linkedDims)
def restoreBackup(self, paramsToApply):
- r"""
+ """
Restore the parameters from perviously created backup.
- This needed to be overridden due to linked components which actually have a parameter value of another
- ARMI component.
+ This needed to be overridden due to linked components which actually have a parameter value
+ of another ARMI component.
"""
linkedDims = self._getLinkedDimsAndValues()
composites.Composite.restoreBackup(self, paramsToApply)
@@ -1147,12 +1150,12 @@ def _getLinkedDimsAndValues(self):
linkedDims = []
for dimName in self.DIMENSION_NAMES:
- # backUp and restore are called in tight loops, getting the value and
- # checking here is faster than calling self.dimensionIsLinked because that
- # requires and extra p.__getitem__
+ # backUp and restore are called in tight loops, getting the value and checking here is
+ # faster than calling self.dimensionIsLinked because that requires and extra
+ # p.__getitem__
try:
val = self.p[dimName]
- except: # noqa: bare-except
+ except Exception:
raise RuntimeError(
"Could not find parameter {} defined for {}. Is the desired "
"Component class?".format(dimName, self)
@@ -1223,14 +1226,13 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
"""
Return the multigroup neutron tracklength in [n-cm/s].
- The first entry is the first energy group (fastest neutrons). Each additional
- group is the next energy group, as set in the ISOTXS library.
+ The first entry is the first energy group (fastest neutrons). Each additional group is the
+ next energy group, as set in the ISOTXS library.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
-
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
@@ -1241,7 +1243,8 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
if self.p.pinNum is None:
# no pin-level flux is available
if not self.parent:
- return numpy.zeros(1)
+ return np.zeros(1)
+
volumeFraction = self.getVolume() / self.parent.getVolume()
return volumeFraction * self.parent.getIntegratedMgFlux(adjoint, gamma)
@@ -1256,15 +1259,20 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
pinFluxes = self.parent.p.pinMgFluxesAdj
else:
pinFluxes = self.parent.p.pinMgFluxes
+
return pinFluxes[self.p.pinNum - 1] * self.getVolume()
- def density(self):
+ def density(self) -> float:
"""Returns the mass density of the object in g/cc."""
density = composites.Composite.density(self)
- if not density:
- # possible that there are no nuclides in this component yet. In that case, defer to Material.
- density = self.material.density(Tc=self.temperatureInC)
+ if not density and not isinstance(self.material, void.Void):
+ # possible that there are no nuclides in this component yet. In that case,
+ # defer to Material. Material.density is wrapped to warn if it's attached
+ # to a parent. Avoid that by calling the inner function directly
+ density = self.material.density.__wrapped__(
+ self.material, Tc=self.temperatureInC
+ )
return density
@@ -1295,9 +1303,8 @@ def getPitchData(self):
Notes
-----
- This pitch data should only be used if this is the pitch defining component in
- a block. The block is responsible for determining which component in it is the
- pitch defining component.
+ This pitch data should only be used if this is the pitch defining component in a block. The
+ block is responsible for determining which component in it is the pitch defining component.
"""
raise NotImplementedError(
f"Method not implemented on component {self}. "
@@ -1308,6 +1315,21 @@ def getFuelMass(self) -> float:
"""Return the mass in grams if this is a fueled component."""
return self.getMass() if self.hasFlags(flags.Flags.FUEL) else 0.0
+ def finalizeLoadingFromDB(self):
+ """Apply any final actions after creating the component from database.
+
+ This should **only** be called internally by the database loader. Otherwise
+ some properties could be doubly applied.
+
+ This exists because the theoretical density is initially defined as a material
+ modification, and then stored as a Material attribute. When reading from blueprints,
+ the blueprint loader sets the theoretical density parameter from the Material
+ attribute. Component parameters are also set when reading from the database.
+ But, we need to set the Material attribute so routines that fetch a material's
+ density property account for the theoretical density.
+ """
+ self.material.adjustTD(self.p.theoreticalDensityFrac)
+
class ShapedComponent(Component):
"""A component with well-defined dimensions."""
diff --git a/armi/reactor/components/componentParameters.py b/armi/reactor/components/componentParameters.py
index 8bd153a6a..14292b126 100644
--- a/armi/reactor/components/componentParameters.py
+++ b/armi/reactor/components/componentParameters.py
@@ -120,6 +120,24 @@ def getComponentParameterDefinitions():
description="Pin number of this component in some mesh. Starts at 1.",
default=None,
)
+
+ def _assignTDFrac(self, val):
+ if val > 1 or val < 0:
+ raise ValueError(
+ f"Theoretical density fraction must be in range [0,1], got {val}"
+ )
+ self._p_theoreticalDensityFrac = val
+
+ pb.defParam(
+ "theoreticalDensityFrac",
+ description=(
+ "Fractional value between zero and one, inclusive, for the theoretical density "
+ "of the material stored on this component."
+ ),
+ units=units.UNITLESS,
+ default=1,
+ setter=_assignTDFrac,
+ )
return pDefs
diff --git a/armi/reactor/composites.py b/armi/reactor/composites.py
index f0a7baf4e..93439c0a1 100644
--- a/armi/reactor/composites.py
+++ b/armi/reactor/composites.py
@@ -36,26 +36,22 @@
import itertools
import operator
import timeit
-from typing import Dict, Optional, Type, Tuple, List, Union
+from typing import Dict, List, Optional, Tuple, Type, Union
-import numpy
+import numpy as np
import six
-import tabulate
-
-from armi import context
-from armi import runLog
-from armi import utils
-from armi.nucDirectory import elements
-from armi.nucDirectory import nucDir, nuclideBases
-from armi.nuclearDataIO import xsCollections
+
+from armi import context, runLog, utils
+from armi.nucDirectory import elements, nucDir, nuclideBases
from armi.physics.neutronics.fissionProductModel import fissionProductModel
-from armi.reactor import grids
-from armi.reactor import parameters
+from armi.reactor import grids, parameters
from armi.reactor.flags import Flags, TypeSpec
from armi.reactor.parameters import resolveCollections
from armi.utils import densityTools
+from armi.utils import tabulate
from armi.utils import units
from armi.utils.densityTools import calculateNumberDensity
+from armi.utils.flags import auto
class FlagSerializer(parameters.Serializer):
@@ -73,7 +69,7 @@ class FlagSerializer(parameters.Serializer):
@staticmethod
def pack(data):
"""
- Flags are represented as a 2-D numpy array of uint8 (single-byte, unsigned
+ Flags are represented as a 2D numpy array of uint8 (single-byte, unsigned
integers), where each row contains the bytes representing a single Flags
instance. We also store the list of field names so that we can verify that the
reader and the writer can agree on the meaning of each bit.
@@ -92,9 +88,9 @@ def _packImpl(data, flagCls: Type[utils.Flag]):
functionality without having to do unholy things to ARMI's actual set of
``reactor.flags.Flags``.
"""
- npa = numpy.array(
- [b for f in data for b in f.to_bytes()], dtype=numpy.uint8
- ).reshape((len(data), flagCls.width()))
+ npa = np.array([b for f in data for b in f.to_bytes()], dtype=np.uint8).reshape(
+ (len(data), flagCls.width())
+ )
return npa, {"flag_order": flagCls.sortedFields()}
@@ -108,7 +104,6 @@ def _remapBits(inp: int, mapping: Dict[int, int]):
----------
inp : int
input bitfield
-
mapping : dict
dictionary mapping from old bit position -> new bit position
"""
@@ -173,11 +168,13 @@ def _unpackImpl(cls, data, version, attrs, flagCls: Type[utils.Flag]):
# Make sure that all of the old flags still exist
if not flagSetIn.issubset(flagSetNow):
missingFlags = flagSetIn - flagSetNow
- raise ValueError(
- "The set of flags in the database includes unknown flags. "
- "Make sure you are using the correct ARMI app. Missing flags:\n"
- "{}".format(missingFlags)
+ runLog.warning(
+ "The set of flags in the database includes unknown flags. For convenience, we will "
+ f"add these to the system: {missingFlags}"
)
+ flagCls.extend({k: auto() for k in missingFlags})
+
+ flagOrderNow = flagCls.sortedFields()
if all(i == j for i, j in zip(flagOrderPassed, flagOrderNow)):
out = [flagCls.from_bytes(row.tobytes()) for row in data]
@@ -218,7 +215,7 @@ def _defineBaseParameters():
the DB, it is possible to recover the flags from that.
* Storing flags to the DB may be complicated, since it is easier to imagine a
number of flags that is greater than the width of natively-supported integer
- types, requiring some extra tricks to store the flages in an HDF5 file.
+ types, requiring some extra tricks to store the flags in an HDF5 file.
* Allowing flags to be modified by plugins further complicates things, in that
it is important to ensure that the meaning of all bits in the flag value are
consistent between a database state and the current ARMI environment. This may
@@ -507,7 +504,6 @@ def copyParamsFrom(self, other):
----------
other : ArmiObject
The object to copy params from
-
"""
self.p = other.p.__class__()
for p, val in other.p.items():
@@ -898,17 +894,6 @@ def getMaxArea(self):
"""
raise NotImplementedError()
- def getMaxVolume(self):
- """
- The maximum volume of this object if it were totally full.
-
- Returns
- -------
- vol : float
- volume in cm^3.
- """
- raise NotImplementedError()
-
def getMass(self, nuclideNames=None):
"""
Determine the mass in grams of nuclide(s) and/or elements in this object.
@@ -1299,7 +1284,7 @@ def getNuclideNumberDensities(self, nucNames):
multiplying the number densities within each child Composite by the volume
of the child Composite and dividing by the total volume of the Composite.
"""
- volumes = numpy.array(
+ volumes = np.array(
[
c.getVolume() / (c.parent.getSymmetryFactor() if c.parent else 1.0)
for c in self
@@ -1316,7 +1301,7 @@ def getNuclideNumberDensities(self, nucNames):
densListForEachComp.append(
[numberDensityDict.get(nuc, 0.0) for nuc in nucNames]
)
- nucDensForEachComp = numpy.array(densListForEachComp) # c x n
+ nucDensForEachComp = np.array(densListForEachComp) # c x n
return volumes.dot(nucDensForEachComp) / totalVol
@@ -1362,124 +1347,6 @@ def getNumberDensities(self, expandFissionProducts=False):
return self._expandLFPs(numberDensities)
return numberDensities
- def getNeutronEnergyDepositionConstants(self):
- """
- Get the neutron energy deposition group constants for a composite.
-
- Returns
- -------
- energyDepConstants: numpy.array
- Neutron energy generation group constants (in Joules/cm)
-
- Raises
- ------
- RuntimeError:
- Reports if a cross section library is not assigned to a reactor.
- """
- if not self.r.core.lib:
- raise RuntimeError(
- "Cannot get neutron energy deposition group constants without "
- "a library. Please ensure a library exists."
- )
-
- return xsCollections.computeNeutronEnergyDepositionConstants(
- self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
- )
-
- def getGammaEnergyDepositionConstants(self):
- """
- Get the gamma energy deposition group constants for a composite.
-
- Returns
- -------
- energyDepConstants: numpy.array
- Energy generation group constants (in Joules/cm)
-
- Raises
- ------
- RuntimeError:
- Reports if a cross section library is not assigned to a reactor.
- """
- if not self.r.core.lib:
- raise RuntimeError(
- "Cannot get gamma energy deposition group constants without "
- "a library. Please ensure a library exists."
- )
-
- return xsCollections.computeGammaEnergyDepositionConstants(
- self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
- )
-
- def getTotalEnergyGenerationConstants(self):
- """
- Get the total energy generation group constants for a composite.
-
- Gives the total energy generation rates when multiplied by the multigroup flux.
-
- Returns
- -------
- totalEnergyGenConstant: numpy.array
- Total (fission + capture) energy generation group constants (Joules/cm)
- """
- return (
- self.getFissionEnergyGenerationConstants()
- + self.getCaptureEnergyGenerationConstants()
- )
-
- def getFissionEnergyGenerationConstants(self):
- """
- Get the fission energy generation group constants for a composite.
-
- Gives the fission energy generation rates when multiplied by the multigroup
- flux.
-
- Returns
- -------
- fissionEnergyGenConstant: numpy.array
- Energy generation group constants (Joules/cm)
-
- Raises
- ------
- RuntimeError:
- Reports if a cross section library is not assigned to a reactor.
- """
- if not self.r.core.lib:
- raise RuntimeError(
- "Cannot compute energy generation group constants without a library"
- ". Please ensure a library exists."
- )
-
- return xsCollections.computeFissionEnergyGenerationConstants(
- self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
- )
-
- def getCaptureEnergyGenerationConstants(self):
- """
- Get the capture energy generation group constants for a composite.
-
- Gives the capture energy generation rates when multiplied by the multigroup
- flux.
-
- Returns
- -------
- fissionEnergyGenConstant: numpy.array
- Energy generation group constants (Joules/cm)
-
- Raises
- ------
- RuntimeError:
- Reports if a cross section library is not assigned to a reactor.
- """
- if not self.r.core.lib:
- raise RuntimeError(
- "Cannot compute energy generation group constants without a library"
- ". Please ensure a library exists."
- )
-
- return xsCollections.computeCaptureEnergyGenerationConstants(
- self.getNumberDensities(), self.r.core.lib, self.getMicroSuffix()
- )
-
def _expandLFPs(self, numberDensities):
"""
Expand the LFPs on the numberDensities dictionary using this composite's
@@ -1765,23 +1632,6 @@ def getFissileMassEnrich(self):
else:
return 0.0
- def getBoronMassEnrich(self):
- """Return B-10 mass fraction."""
- b10 = self.getMass("B10")
- b11 = self.getMass("B11")
- total = b11 + b10
- if total == 0.0:
- return 0.0
- return b10 / total
-
- def getUraniumMassEnrich(self):
- """Returns U-235 mass fraction assuming U-235 and U-238 only."""
- u5 = self.getMass("U235")
- if u5 < 1e-10:
- return 0.0
- u8 = self.getMass("U238")
- return u5 / (u8 + u5)
-
def getUraniumNumEnrich(self):
"""Returns U-235 number fraction."""
u8 = self.getNumberDensity("U238")
@@ -1790,20 +1640,6 @@ def getUraniumNumEnrich(self):
u5 = self.getNumberDensity("U235")
return u5 / (u8 + u5)
- def getPuN(self):
- """Returns total number density of Pu isotopes."""
- nucNames = [nuc.name for nuc in elements.byZ[94].nuclides]
- return sum(self.getNuclideNumberDensities(nucNames))
-
- def getPuMoles(self):
- """Returns total number of moles of Pu isotopes."""
- return (
- self.getPuN()
- / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
- * self.getVolume()
- * self.getSymmetryFactor()
- )
-
def calcTotalParam(
self,
param,
@@ -2066,7 +1902,7 @@ def _minMaxHelper(
def getChildParamValues(self, param):
"""Get the child parameter values in a numpy array."""
- return numpy.array([child.p[param] for child in self])
+ return np.array([child.p[param] for child in self])
def isFuel(self):
"""True if this is a fuel block."""
@@ -2145,48 +1981,6 @@ def getHMDens(self):
hmDens = sum(self.getNuclideNumberDensities(hmNuclides))
return hmDens
- def getPuMass(self):
- """Get the mass of Pu in this object in grams."""
- nucs = []
- for nucName in [nuc.name for nuc in elements.byZ[94].nuclides]:
- nucs.append(nucName)
- pu = self.getMass(nucs)
- return pu
-
- def getPuFrac(self):
- """
- Compute the Pu/HM mass fraction in this object.
-
- Returns
- -------
- puFrac : float
- The pu mass fraction in heavy metal in this assembly
- """
- hm = self.getHMMass()
- pu = self.getPuMass()
- if hm == 0.0:
- return 0.0
- else:
- return pu / hm
-
- def getZrFrac(self):
- """Return the total zr/(hm+zr) fraction in this assembly."""
- hm = self.getHMMass()
- zrNucs = [nuc.name for nuc in elements.bySymbol["ZR"].nuclides]
- zr = self.getMass(zrNucs)
- if hm + zr > 0:
- return zr / (hm + zr)
- else:
- return 0.0
-
- def getMaxUraniumMassEnrich(self):
- maxV = 0
- for child in self:
- v = child.getUraniumMassEnrich()
- if v > maxV:
- maxV = v
- return maxV
-
def getFPMass(self):
"""Returns mass of fission products in this block in grams."""
nucs = []
@@ -2310,7 +2104,7 @@ def getMgFlux(self, adjoint=False, average=False, volume=None, gamma=False):
Returns
-------
- flux : numpy.array
+ flux : np.ndarray
multigroup neutron flux in [n/cm^2/s]
"""
if average:
@@ -2458,7 +2252,7 @@ def getComponentsOfMaterial(self, material=None, materialName=None):
Parameters
----------
- material : Material object, optional
+ material : armi.materials.material.Material, optional
The material to match
materialName : str, optional
The material name to match.
@@ -2597,16 +2391,6 @@ def setComponentDimensionsReport(self):
return reportGroups
- def printDensities(self, expandFissionProducts=False):
- """Get lines that have the number densities of a object."""
- numberDensities = self.getNumberDensities(
- expandFissionProducts=expandFissionProducts
- )
- lines = []
- for nucName, nucDens in numberDensities.items():
- lines.append("{0:6s} {1:.7E}".format(nucName, nucDens))
- return lines
-
def expandAllElementalsToIsotopics(self):
reactorNucs = self.getNuclides()
for elemental in nuclideBases.where(
@@ -2666,7 +2450,7 @@ def getDominantMaterial(self, typeSpec: TypeSpec = None, exact=False):
Returns
-------
- mat : Material
+ mat : armi.materials.material.Material
the first instance of the most dominant material (by volume) in this object.
See Also
@@ -3074,9 +2858,7 @@ def _syncParameters(self, allSyncData, errors):
# out of sync, and this parameter was also globally modified and
# readjusted to the original value.
curVal = self.p[key]
- if isinstance(val, numpy.ndarray) or isinstance(
- curVal, numpy.ndarray
- ):
+ if isinstance(val, np.ndarray) or isinstance(curVal, np.ndarray):
if (val != curVal).any():
errors[self, key].append(nodeRank)
elif curVal != val:
@@ -3210,25 +2992,26 @@ def getIntegratedMgFlux(self, adjoint=False, gamma=False):
----------
adjoint : bool, optional
Return adjoint flux instead of real
-
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
- integratedFlux : numpy.array
+ integratedFlux : np.ndarray
multigroup neutron tracklength in [n-cm/s]
"""
- integratedMgFlux = numpy.zeros(1)
-
+ integratedMgFlux = np.zeros(1)
for c in self:
- integratedMgFlux = integratedMgFlux + c.getIntegratedMgFlux(
- adjoint=adjoint, gamma=gamma
- )
+ mgFlux = c.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma)
+ if mgFlux is not None:
+ integratedMgFlux = integratedMgFlux + mgFlux
+
return integratedMgFlux
def _getReactionRates(self, nucName, nDensity=None):
"""
+ Helper to get the reaction rates of a certain nuclide on one ArmiObject.
+
Parameters
----------
nucName : str
@@ -3243,16 +3026,21 @@ def _getReactionRates(self, nucName, nDensity=None):
Notes
-----
- If you set nDensity to 1/CM2_PER_BARN this makes 1 group cross section generation easier
+ If you set nDensity to 1/CM2_PER_BARN this makes 1 group cross section generation easier.
+
+ This method is not designed to work on ``Assembly``, ``Core``, or anything higher on the
+ heirarchy than ``Block``.
"""
from armi.reactor.blocks import Block
+ from armi.reactor.reactors import Core
if nDensity is None:
nDensity = self.getNumberDensity(nucName)
+
try:
return getReactionRateDict(
nucName,
- self.getAncestorWithFlags(Flags.CORE).lib,
+ self.getAncestor(lambda c: isinstance(c, Core)).lib,
self.getAncestor(lambda x: isinstance(x, Block)).getMicroSuffix(),
self.getIntegratedMgFlux(),
nDensity,
@@ -3272,7 +3060,7 @@ def _getReactionRates(self, nucName, nDensity=None):
def getReactionRates(self, nucName, nDensity=None):
"""
- Get the reaction rates of a certain nuclide on this object.
+ Get the reaction rates of a certain nuclide on this ArmiObject.
Parameters
----------
@@ -3288,18 +3076,23 @@ def getReactionRates(self, nucName, nDensity=None):
Notes
-----
- This is volume integrated NOT (1/cm3-s)
+ This is volume integrated NOT (1/cm3-s).
- If you set nDensity to 1 this makes 1-group cross section generation easier
+ If you set nDensity to 1 this makes 1-group cross section generation easier.
"""
- rxnRates = {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0, "n3n": 0}
+ from armi.reactor.components import Component
- # not all composite objects are iterable (i.e. components), so in that
- # case just examine only the object itself
- for armiObject in self.getChildren() or [self]:
- for rxName, val in armiObject._getReactionRates(
- nucName, nDensity=nDensity
- ).items():
+ # find child objects
+ objects = self.getChildren(
+ deep=True, predicate=lambda x: isinstance(x, Component)
+ )
+ if not len(objects):
+ objects = [self]
+
+ # The reaction rates for this object is the sum of its children
+ rxnRates = {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0, "n3n": 0}
+ for armiObject in objects:
+ for rxName, val in armiObject._getReactionRates(nucName, nDensity).items():
rxnRates[rxName] += val
return rxnRates
@@ -3310,10 +3103,6 @@ def printContents(self, includeNuclides=True):
for c in self.getChildren():
c.printContents(includeNuclides=includeNuclides)
- def isOnWhichSymmetryLine(self):
- grid = self.parent.spatialGrid
- return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())
-
def _genChildByLocationLookupTable(self):
"""Update the childByLocation lookup table."""
runLog.extra("Generating location-to-child lookup table.")
@@ -3466,7 +3255,7 @@ def getReactionRateDict(nucName, lib, xsSuffix, mgFlux, nDens):
xsSuffix : str
cross section suffix, consisting of the type followed by the burnup group, e.g. 'AB' for the
second burnup group of type A
- mgFlux : numpy.nArray
+ mgFlux : np.ndarray
integrated mgFlux (n-cm/s)
nDens : float
number density (atom/bn-cm)
diff --git a/armi/reactor/converters/axialExpansionChanger.py b/armi/reactor/converters/axialExpansionChanger.py
deleted file mode 100644
index a9eda0989..000000000
--- a/armi/reactor/converters/axialExpansionChanger.py
+++ /dev/null
@@ -1,909 +0,0 @@
-# Copyright 2019 TerraPower, LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Enable component-wise axial expansion for assemblies and/or a reactor."""
-
-from statistics import mean
-from typing import List
-
-from armi import runLog
-from armi.materials import material
-from armi.reactor.components import UnshapedComponent
-from armi.reactor.flags import Flags
-from numpy import array
-
-TARGET_FLAGS_IN_PREFERRED_ORDER = [
- Flags.FUEL,
- Flags.CONTROL,
- Flags.POISON,
- Flags.SHIELD,
- Flags.SLUG,
-]
-
-
-def getDefaultReferenceAssem(assems):
- """Return a default reference assembly."""
- # if assemblies are defined in blueprints, handle meshing
- # assume finest mesh is reference
- assemsByNumBlocks = sorted(
- assems,
- key=lambda a: len(a),
- reverse=True,
- )
- return assemsByNumBlocks[0] if assemsByNumBlocks else None
-
-
-def makeAssemsAbleToSnapToUniformMesh(
- assems, nonUniformAssemFlags, referenceAssembly=None
-):
- """Make this set of assemblies aware of the reference mesh so they can stay uniform as they axially expand."""
- if not referenceAssembly:
- referenceAssembly = getDefaultReferenceAssem(assems)
- # make the snap lists so assems know how to expand
- nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in nonUniformAssemFlags]
- for a in assems:
- if any(a.hasFlags(f) for f in nonUniformAssems):
- continue
- a.makeAxialSnapList(referenceAssembly)
-
-
-def expandColdDimsToHot(
- assems: list,
- isDetailedAxialExpansion: bool,
- referenceAssembly=None,
-):
- """
- Expand BOL assemblies, resolve disjoint axial mesh (if needed), and update block BOL heights.
-
- .. impl:: Perform expansion during core construction based on block heights at a specified temperature.
- :id: I_ARMI_INP_COLD_HEIGHT
- :implements: R_ARMI_INP_COLD_HEIGHT
-
- This method is designed to be used during core construction to axially thermally expand the
- assemblies to their "hot" temperatures (as determined by ``Thot`` values in blueprints).
- First, The Assembly is prepared for axial expansion via ``setAssembly``. In
- ``applyColdHeightMassIncrease``, the number densities on each Component is adjusted to
- reflect that Assembly inputs are at cold (i.e., ``Tinput``) temperatures. To expand to
- the requested hot temperatures, thermal expansion factors are then computed in
- ``computeThermalExpansionFactors``. Finally, the Assembly is axially thermally expanded in
- ``axiallyExpandAssembly``.
-
- If the setting ``detailedAxialExpansion`` is ``False``, then each Assembly gets its Block mesh
- set to match that of the "reference" Assembly (see ``getDefaultReferenceAssem`` and ``setBlockMesh``).
-
- Once the Assemblies are axially expanded, the Block BOL heights are updated. To account for the change in
- Block volume from axial expansion, ``completeInitialLoading`` is called to update any volume-dependent
- Block information.
-
- Parameters
- ----------
- assems: list[:py:class:`Assembly `]
- list of assemblies to be thermally expanded
- isDetailedAxialExpansion: bool
- If False, assemblies will be forced to conform to the reference mesh after expansion
- referenceAssembly: :py:class:`Assembly `, optional
- Assembly whose mesh other meshes will conform to if isDetailedAxialExpansion is False.
- If not provided, will assume the finest mesh assembly which is typically fuel.
-
- Notes
- -----
- Calling this method will result in an increase in mass via applyColdHeightMassIncrease!
-
- See Also
- --------
- :py:meth:`armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger.applyColdHeightMassIncrease`
- """
- assems = list(assems)
- if not referenceAssembly:
- referenceAssembly = getDefaultReferenceAssem(assems)
- axialExpChanger = AxialExpansionChanger(isDetailedAxialExpansion)
- for a in assems:
- axialExpChanger.setAssembly(a, expandFromTinputToThot=True)
- axialExpChanger.applyColdHeightMassIncrease()
- axialExpChanger.expansionData.computeThermalExpansionFactors()
- axialExpChanger.axiallyExpandAssembly()
- if not isDetailedAxialExpansion:
- for a in assems:
- a.setBlockMesh(referenceAssembly.getAxialMesh())
- # update block BOL heights to reflect hot heights
- for a in assems:
- for b in a:
- b.p.heightBOL = b.getHeight()
- b.completeInitialLoading()
-
-
-class AxialExpansionChanger:
- """
- Axially expand or contract assemblies or an entire core.
-
- Attributes
- ----------
- linked : :py:class:`AssemblyAxialLinkage`
- establishes object containing axial linkage information
- expansionData : :py:class:`ExpansionData `
- establishes object to store and access relevant expansion data
-
- Notes
- -----
- - Is designed to work with general, vertically oriented, pin-type assembly designs. It is not set up to account
- for any other assembly type.
- - Useful for fuel performance, thermal expansion, reactivity coefficients, etc.
- """
-
- def __init__(self, detailedAxialExpansion: bool = False):
- """
- Build an axial expansion converter.
-
- Parameters
- ----------
- detailedAxialExpansion : bool, optional
- A boolean to indicate whether or not detailedAxialExpansion is to be utilized.
- """
- self._detailedAxialExpansion = detailedAxialExpansion
- self.linked = None
- self.expansionData = None
-
- def performPrescribedAxialExpansion(
- self, a, componentLst: list, percents: list, setFuel=True
- ):
- """Perform axial expansion/contraction of an assembly given prescribed expansion percentages.
-
- .. impl:: Perform expansion/contraction, given a list of components and expansion coefficients.
- :id: I_ARMI_AXIAL_EXP_PRESC
- :implements: R_ARMI_AXIAL_EXP_PRESC
-
- This method performs component-wise axial expansion for an Assembly given expansion coefficients
- and a corresponding list of Components. In ``setAssembly``, the Assembly is prepared
- for axial expansion by determining Component-wise axial linkage and checking to see if a dummy Block
- is in place (necessary for ensuring conservation properties). The provided expansion factors are
- then assigned to their corresponding Components in ``setExpansionFactors``. Finally, the axial
- expansion is performed in ``axiallyExpandAssembly``
-
- Parameters
- ----------
- a : :py:class:`Assembly `
- ARMI assembly to be changed
- componentLst : list[:py:class:`Component `]
- list of Components to be expanded
- percents : list[float]
- list of expansion percentages for each component listed in componentList
- setFuel : boolean, optional
- Boolean to determine whether or not fuel blocks should have their target components set
- This is useful when target components within a fuel block need to be determined on-the-fly.
-
- Notes
- -----
- - percents may be positive (expansion) or negative (contraction)
- """
- self.setAssembly(a, setFuel)
- self.expansionData.setExpansionFactors(componentLst, percents)
- self.axiallyExpandAssembly()
-
- def performThermalAxialExpansion(
- self,
- a,
- tempGrid: list,
- tempField: list,
- setFuel: bool = True,
- expandFromTinputToThot: bool = False,
- ):
- """Perform thermal expansion/contraction for an assembly given an axial temperature grid and
- field.
-
- .. impl:: Perform thermal expansion/contraction, given an axial temperature distribution
- over an assembly.
- :id: I_ARMI_AXIAL_EXP_THERM
- :implements: R_ARMI_AXIAL_EXP_THERM
-
- This method performs component-wise thermal expansion for an assembly given a discrete
- temperature distribution over the axial length of the Assembly. In ``setAssembly``, the
- Assembly is prepared for axial expansion by determining Component-wise axial linkage and
- checking to see if a dummy Block is in place (necessary for ensuring conservation
- properties). The discrete temperature distribution is then leveraged to update Component
- temperatures and compute thermal expansion factors (via
- ``updateComponentTempsBy1DTempField`` and ``computeThermalExpansionFactors``,
- respectively). Finally, the axial expansion is performed in ``axiallyExpandAssembly``.
-
- Parameters
- ----------
- a : :py:class:`Assembly `
- ARMI assembly to be changed
- tempGrid : float, list
- Axial temperature grid (in cm) (i.e., physical locations where temp is stored)
- tempField : float, list
- Temperature values (in C) along grid
- setFuel : boolean, optional
- Boolean to determine whether or not fuel blocks should have their target components set
- This is useful when target components within a fuel block need to be determined on-the-fly.
- expandFromTinputToThot: bool
- determines if thermal expansion factors should be calculated from c.inputTemperatureInC
- to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)
- """
- self.setAssembly(a, setFuel, expandFromTinputToThot)
- self.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField)
- self.expansionData.computeThermalExpansionFactors()
- self.axiallyExpandAssembly()
-
- def reset(self):
- self.linked = None
- self.expansionData = None
-
- def setAssembly(self, a, setFuel=True, expandFromTinputToThot=False):
- """Set the armi assembly to be changed and init expansion data class for assembly.
-
- Parameters
- ----------
- a : :py:class:`Assembly `
- ARMI assembly to be changed
- setFuel : boolean, optional
- Boolean to determine whether or not fuel blocks should have their target components set
- This is useful when target components within a fuel block need to be determined on-the-fly.
- expandFromTinputToThot: bool
- determines if thermal expansion factors should be calculated from c.inputTemperatureInC
- to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)
-
- Notes
- -----
- When considering thermal expansion, if there is an axial temperature distribution on the
- assembly, the axial expansion methodology will NOT perfectly preseve mass. The magnitude of
- the gradient of the temperature distribution is the primary factor in determining the
- cumulative loss of mass conservation. Additional details will be documented in
- :ref:`axialExpansion` of the documentation.
- """
- self.linked = AssemblyAxialLinkage(a)
- self.expansionData = ExpansionData(
- a, setFuel=setFuel, expandFromTinputToThot=expandFromTinputToThot
- )
- self._isTopDummyBlockPresent()
-
- def applyColdHeightMassIncrease(self):
- """
- Increase component mass because they are declared at cold dims.
-
- Notes
- -----
- A cold 1 cm tall component will have more mass that a component with the
- same mass/length as a component with a hot height of 1 cm. This should be
- called when the setting `inputHeightsConsideredHot` is used. This adjusts
- the expansion factor applied during applyMaterialMassFracsToNumberDensities.
- """
- for c in self.linked.a.getComponents():
- axialExpansionFactor = 1.0 + c.material.linearExpansionFactor(
- c.temperatureInC, c.inputTemperatureInC
- )
- c.changeNDensByFactor(axialExpansionFactor)
-
- def _isTopDummyBlockPresent(self):
- """Determines if top most block of assembly is a dummy block.
-
- Notes
- -----
- - If true, then axial expansion will be physical for all blocks.
- - If false, the top most block in the assembly is artificially chopped
- to preserve the assembly height. A runLog.Warning also issued.
- """
- blkLst = self.linked.a.getBlocks()
- if not blkLst[-1].hasFlags(Flags.DUMMY):
- runLog.warning(
- f"No dummy block present at the top of {self.linked.a}! "
- "Top most block will be artificially chopped "
- "to preserve assembly height"
- )
- if self._detailedAxialExpansion:
- msg = "Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!"
- runLog.error(msg)
- raise RuntimeError(msg)
-
- def axiallyExpandAssembly(self):
- """Utilizes assembly linkage to do axial expansion.
-
- .. impl:: Preserve the total height of an ARMI assembly, during expansion.
- :id: I_ARMI_ASSEM_HEIGHT_PRES
- :implements: R_ARMI_ASSEM_HEIGHT_PRES
-
- The total height of an Assembly is preserved by not changing the ``ztop`` position
- of the top-most Block in an Assembly. The ``zbottom`` of the top-most Block is
- adjusted to match the Block immediately below it. The ``height`` of the
- top-most Block is is then updated to reflect any expansion/contraction.
- """
- mesh = [0.0]
- numOfBlocks = self.linked.a.countBlocksWithFlags()
- runLog.debug(
- "Printing component expansion information (growth percentage and 'target component')"
- f"for each block in assembly {self.linked.a}."
- )
- for ib, b in enumerate(self.linked.a):
- runLog.debug(msg=f" Block {b}")
- blockHeight = b.getHeight()
- # set bottom of block equal to top of block below it
- # if ib == 0, leave block bottom = 0.0
- if ib > 0:
- b.p.zbottom = self.linked.linkedBlocks[b][0].p.ztop
- isDummyBlock = ib == (numOfBlocks - 1)
- if not isDummyBlock:
- for c in getSolidComponents(b):
- growFrac = self.expansionData.getExpansionFactor(c)
- runLog.debug(msg=f" Component {c}, growFrac = {growFrac:.4e}")
- c.height = growFrac * blockHeight
- # align linked components
- if ib == 0:
- c.zbottom = 0.0
- else:
- if self.linked.linkedComponents[c][0] is not None:
- # use linked components below
- c.zbottom = self.linked.linkedComponents[c][0].ztop
- else:
- # otherwise there aren't any linked components
- # so just set the bottom of the component to
- # the top of the block below it
- c.zbottom = self.linked.linkedBlocks[b][0].p.ztop
- c.ztop = c.zbottom + c.height
- # update component number densities
- newNumberDensities = {
- nuc: c.getNumberDensity(nuc) / growFrac
- for nuc in c.getNuclides()
- }
- c.setNumberDensities(newNumberDensities)
- # redistribute block boundaries if on the target component
- if self.expansionData.isTargetComponent(c):
- b.p.ztop = c.ztop
- b.p.height = b.p.ztop - b.p.zbottom
- else:
- b.p.height = b.p.ztop - b.p.zbottom
-
- b.p.z = b.p.zbottom + b.getHeight() / 2.0
-
- _checkBlockHeight(b)
- # Call Component.clearCache to update the Component volume, and therefore the masses,
- # of all solid components.
- for c in getSolidComponents(b):
- c.clearCache()
- # redo mesh -- functionality based on assembly.calculateZCoords()
- mesh.append(b.p.ztop)
- b.spatialLocator = self.linked.a.spatialGrid[0, 0, ib]
-
- bounds = list(self.linked.a.spatialGrid._bounds)
- bounds[2] = array(mesh)
- self.linked.a.spatialGrid._bounds = tuple(bounds)
-
- def manageCoreMesh(self, r):
- """Manage core mesh post assembly-level expansion.
-
- Parameters
- ----------
- r : :py:class:`Reactor `
- ARMI reactor to have mesh modified
-
- Notes
- -----
- - if no detailedAxialExpansion, then do "cheap" approach to uniformMesh converter.
- - update average core mesh values with call to r.core.updateAxialMesh()
- - oldMesh will be None during initial core construction at processLoading as it has not yet
- been set.
- """
- if not self._detailedAxialExpansion:
- # loop through again now that the reference is adjusted and adjust the non-fuel assemblies.
- for a in r.core.getAssemblies():
- a.setBlockMesh(r.core.refAssem.getAxialMesh())
-
- oldMesh = r.core.p.axialMesh
- r.core.updateAxialMesh()
- if oldMesh:
- runLog.extra("Updated r.core.p.axialMesh (old, new)")
- for old, new in zip(oldMesh, r.core.p.axialMesh):
- runLog.extra(f"{old:.6e}\t{new:.6e}")
-
-
-def getSolidComponents(b):
- """
- Return list of components in the block that have solid material.
-
- Notes
- -----
- Axial expansion only needs to be applied to solid materials. We should not update
- number densities on fluid materials to account for changes in block height.
- """
- return [c for c in b if not isinstance(c.material, material.Fluid)]
-
-
-def _checkBlockHeight(b):
- """
- Do some basic block height validation.
-
- Notes
- -----
- 3cm is a presumptive lower threshhold for DIF3D
- """
- if b.getHeight() < 3.0:
- runLog.debug(
- "Block {0:s} ({1:s}) has a height less than 3.0 cm. ({2:.12e})".format(
- b.name, str(b.p.flags), b.getHeight()
- )
- )
-
- if b.getHeight() < 0.0:
- raise ArithmeticError(
- "Block {0:s} ({1:s}) has a negative height! ({2:.12e})".format(
- b.name, str(b.p.flags), b.getHeight()
- )
- )
-
-
-class AssemblyAxialLinkage:
- """Determines and stores the block- and component-wise axial linkage for an assembly.
-
- Attributes
- ----------
- a : :py:class:`Assembly `
- reference to original assembly; is directly modified/changed during expansion.
-
- linkedBlocks : dict
- - keys = :py:class:`Block `
- - values = list of axially linked blocks; index 0 = lower linked block; index 1: upper
- linked block.
-
- linkedComponents : dict
- - keys = :py:class:`Component `
- - values = list of axially linked components; index 0 = lower linked component;
- index 1: upper linked component.
-
- See Also
- --------
- - self._getLinkedComponents
- - self._getLinkedBlocks()
- """
-
- def __init__(self, StdAssem):
- self.a = StdAssem
- self.linkedBlocks = {}
- self.linkedComponents = {}
- self._determineAxialLinkage()
-
- def _determineAxialLinkage(self):
- """Gets the block and component based linkage."""
- for b in self.a:
- self._getLinkedBlocks(b)
- for c in getSolidComponents(b):
- self._getLinkedComponents(b, c)
-
- def _getLinkedBlocks(self, b):
- """Retrieve the axial linkage for block b.
-
- Parameters
- ----------
- b : :py:class:`Block `
- block to determine axial linkage for
-
- Notes
- -----
- - block linkage is determined by matching ztop/zbottom (see below)
- - block linkage is stored in self.linkedBlocks[b]
- _ _
- | |
- | 2 | Block 2 is linked to block 1.
- |_ _|
- | |
- | 1 | Block 1 is linked to both block 0 and 1.
- |_ _|
- | |
- | 0 | Block 0 is linked to block 1.
- |_ _|
- """
- lowerLinkedBlock = None
- upperLinkedBlock = None
- block_list = self.a.getChildren()
- for otherBlk in block_list:
- if b.name != otherBlk.name:
- if b.p.zbottom == otherBlk.p.ztop:
- lowerLinkedBlock = otherBlk
- elif b.p.ztop == otherBlk.p.zbottom:
- upperLinkedBlock = otherBlk
-
- self.linkedBlocks[b] = [lowerLinkedBlock, upperLinkedBlock]
-
- if lowerLinkedBlock is None:
- runLog.debug(
- "Assembly {0:22s} at location {1:22s}, Block {2:22s}"
- "is not linked to a block below!".format(
- str(self.a.getName()),
- str(self.a.getLocation()),
- str(b.p.flags),
- ),
- single=True,
- )
- if upperLinkedBlock is None:
- runLog.debug(
- "Assembly {0:22s} at location {1:22s}, Block {2:22s}"
- "is not linked to a block above!".format(
- str(self.a.getName()),
- str(self.a.getLocation()),
- str(b.p.flags),
- ),
- single=True,
- )
-
- def _getLinkedComponents(self, b, c):
- """Retrieve the axial linkage for component c.
-
- Parameters
- ----------
- b : :py:class:`Block `
- key to access blocks containing linked components
- c : :py:class:`Component `
- component to determine axial linkage for
-
- Raises
- ------
- RuntimeError
- multiple candidate components are found to be axially linked to a component
- """
- lstLinkedC = [None, None]
- for ib, linkdBlk in enumerate(self.linkedBlocks[b]):
- if linkdBlk is not None:
- for otherC in getSolidComponents(linkdBlk.getChildren()):
- if _determineLinked(c, otherC):
- if lstLinkedC[ib] is not None:
- errMsg = (
- "Multiple component axial linkages have been found for "
- f"Component {c}; Block {b}; Assembly {b.parent}."
- " This is indicative of an error in the blueprints! Linked "
- f"components found are {lstLinkedC[ib]} and {otherC}"
- )
- runLog.error(msg=errMsg)
- raise RuntimeError(errMsg)
- lstLinkedC[ib] = otherC
-
- self.linkedComponents[c] = lstLinkedC
-
- if lstLinkedC[0] is None:
- runLog.debug(
- f"Assembly {self.a}, Block {b}, Component {c} has nothing linked below it!",
- single=True,
- )
- if lstLinkedC[1] is None:
- runLog.debug(
- f"Assembly {self.a}, Block {b}, Component {c} has nothing linked above it!",
- single=True,
- )
-
-
-def _determineLinked(componentA, componentB):
- """Determine axial component linkage for two components.
-
- Parameters
- ----------
- componentA : :py:class:`Component `
- component of interest
- componentB : :py:class:`Component `
- component to compare and see if is linked to componentA
-
- Notes
- -----
- - Requires that shapes have the getCircleInnerDiameter and getBoundingCircleOuterDiameter
- defined
- - For axial linkage to be True, components MUST be solids, the same Component Class,
- multiplicity, and meet inner and outer diameter requirements.
- - When component dimensions are retrieved, cold=True to ensure that dimensions are evaluated
- at cold/input temperatures. At temperature, solid-solid interfaces in ARMI may produce
- slight overlaps due to thermal expansion. Handling these potential overlaps are out of scope.
-
- Returns
- -------
- linked : bool
- status is componentA and componentB are axially linked to one another
- """
- if (
- (componentA.containsSolidMaterial() and componentB.containsSolidMaterial())
- and isinstance(componentA, type(componentB))
- and (componentA.getDimension("mult") == componentB.getDimension("mult"))
- ):
- if isinstance(componentA, UnshapedComponent):
- runLog.warning(
- f"Components {componentA} and {componentB} are UnshapedComponents "
- "and do not have 'getCircleInnerDiameter' or getBoundingCircleOuterDiameter "
- "methods; nor is it physical to do so. Instead of crashing and raising an error, "
- "they are going to be assumed to not be linked.",
- single=True,
- )
- linked = False
- else:
- idA, odA = (
- componentA.getCircleInnerDiameter(cold=True),
- componentA.getBoundingCircleOuterDiameter(cold=True),
- )
- idB, odB = (
- componentB.getCircleInnerDiameter(cold=True),
- componentB.getBoundingCircleOuterDiameter(cold=True),
- )
-
- biggerID = max(idA, idB)
- smallerOD = min(odA, odB)
- if biggerID >= smallerOD:
- # one object fits inside the other
- linked = False
- else:
- linked = True
-
- else:
- linked = False
-
- return linked
-
-
-class ExpansionData:
- """Object containing data needed for axial expansion."""
-
- def __init__(self, a, setFuel: bool, expandFromTinputToThot: bool):
- """
- Parameters
- ----------
- a: :py:class:`Assembly `
- Assembly to assign component-wise expansion data to
- setFuel: bool
- used to determine if fuel component should be set as
- axial expansion target component during initialization.
- see self._isFuelLocked
- expandFromTinputToThot: bool
- determines if thermal expansion factors should be calculated
- from c.inputTemperatureInC to c.temperatureInC (True) or some other
- reference temperature and c.temperatureInC (False)
- """
- self._a = a
- self.componentReferenceTemperature = {}
- self._expansionFactors = {}
- self._componentDeterminesBlockHeight = {}
- self._setTargetComponents(setFuel)
- self.expandFromTinputToThot = expandFromTinputToThot
-
- def setExpansionFactors(self, componentLst: List, expFrac: List):
- """Sets user defined expansion fractions.
-
- Parameters
- ----------
- componentLst : List[:py:class:`Component `]
- list of Components to have their heights changed
- expFrac : List[float]
- list of L1/L0 height changes that are to be applied to componentLst
-
- Raises
- ------
- RuntimeError
- If componentLst and expFrac are different lengths
- """
- if len(componentLst) != len(expFrac):
- runLog.error(
- "Number of components and expansion fractions must be the same!\n"
- f" len(componentLst) = {len(componentLst)}\n"
- f" len(expFrac) = {len(expFrac)}"
- )
- raise RuntimeError
- if 0.0 in expFrac:
- msg = (
- "An expansion fraction, L1/L0, equal to 0.0, is not physical. Expansion fractions "
- "should be greater than 0.0."
- )
- runLog.error(msg)
- raise RuntimeError(msg)
- for exp in expFrac:
- if exp < 0.0:
- msg = (
- "A negative expansion fraction, L1/L0, is not physical. Expansion fractions "
- "should be greater than 0.0."
- )
- runLog.error(msg)
- raise RuntimeError(msg)
- for c, p in zip(componentLst, expFrac):
- self._expansionFactors[c] = p
-
- def updateComponentTempsBy1DTempField(self, tempGrid, tempField):
- """Assign a block-average axial temperature to components.
-
- Parameters
- ----------
- tempGrid : numpy array
- 1D axial temperature grid (i.e., physical locations where temp is stored)
- tempField : numpy array
- temperature values along grid
-
- Notes
- -----
- - given a 1D axial temperature grid and distribution, searches for temperatures that fall
- within the bounds of a block, and averages them
- - this average temperature is then passed to self.updateComponentTemp()
-
- Raises
- ------
- ValueError
- if no temperature points found within a block
- RuntimeError
- if tempGrid and tempField are different lengths
- """
- if len(tempGrid) != len(tempField):
- runLog.error("tempGrid and tempField must have the same length.")
- raise RuntimeError
-
- self.componentReferenceTemperature = {} # reset, just to be safe
- for b in self._a:
- tmpMapping = []
- for idz, z in enumerate(tempGrid):
- if b.p.zbottom <= z <= b.p.ztop:
- tmpMapping.append(tempField[idz])
- if z > b.p.ztop:
- break
-
- if len(tmpMapping) == 0:
- raise ValueError(
- f"{b} has no temperature points within it!"
- "Likely need to increase the refinement of the temperature grid."
- )
-
- blockAveTemp = mean(tmpMapping)
- for c in b:
- self.updateComponentTemp(c, blockAveTemp)
-
- def updateComponentTemp(self, c, temp: float):
- """Update component temperatures with a provided temperature.
-
- Parameters
- ----------
- c : :py:class:`Component `
- component to which the temperature, temp, is to be applied
- temp : float
- new component temperature in C
-
- Notes
- -----
- - "reference" height and temperature are the current states; i.e. before
- 1) the new temperature, temp, is applied to the component, and
- 2) the component is axially expanded
- """
- self.componentReferenceTemperature[c] = c.temperatureInC
- c.setTemperature(temp)
-
- def computeThermalExpansionFactors(self):
- """Computes expansion factors for all components via thermal expansion."""
- for b in self._a:
- for c in getSolidComponents(b):
- if self.expandFromTinputToThot:
- # get thermal expansion factor between c.inputTemperatureInC & c.temperatureInC
- self._expansionFactors[c] = c.getThermalExpansionFactor()
- elif c in self.componentReferenceTemperature:
- growFrac = c.getThermalExpansionFactor(
- T0=self.componentReferenceTemperature[c]
- )
- self._expansionFactors[c] = growFrac
- else:
- # We want expansion factors relative to componentReferenceTemperature not
- # Tinput. But for this component there isn't a componentReferenceTemperature, so
- # we'll assume that the expansion factor is 1.0.
- self._expansionFactors[c] = 1.0
-
- def getExpansionFactor(self, c):
- """Retrieves expansion factor for c.
-
- Parameters
- ----------
- c : :py:class:`Component `
- Component to retrive expansion factor for
- """
- value = self._expansionFactors.get(c, 1.0)
- return value
-
- def _setTargetComponents(self, setFuel):
- """Sets target component for each block.
-
- Parameters
- ----------
- setFuel : bool
- boolean to determine if fuel block should have its target component set. Useful for when
- target components should be determined on the fly.
- """
- for b in self._a:
- if b.p.axialExpTargetComponent:
- self._componentDeterminesBlockHeight[
- b.getComponentByName(b.p.axialExpTargetComponent)
- ] = True
- elif b.hasFlags(Flags.PLENUM) or b.hasFlags(Flags.ACLP):
- self.determineTargetComponent(b, Flags.CLAD)
- elif b.hasFlags(Flags.DUMMY):
- self.determineTargetComponent(b, Flags.COOLANT)
- elif setFuel and b.hasFlags(Flags.FUEL):
- self._isFuelLocked(b)
- else:
- self.determineTargetComponent(b)
-
- def determineTargetComponent(self, b, flagOfInterest=None):
- """Determines target component, stores it on the block, and appends it to
- self._componentDeterminesBlockHeight.
-
- Parameters
- ----------
- b : :py:class:`Block `
- block to specify target component for
- flagOfInterest : :py:class:`Flags `
- the flag of interest to identify the target component
-
- Notes
- -----
- - if flagOfInterest is None, finds the component within b that contains flags that
- are defined in a preferred order of flags, or barring that, in b.p.flags
- - if flagOfInterest is not None, finds the component that contains the flagOfInterest.
-
- Raises
- ------
- RuntimeError
- no target component found
- RuntimeError
- multiple target components found
- """
- if flagOfInterest is None:
- # Follow expansion of most neutronically important component, fuel then control/poison
- for targetFlag in TARGET_FLAGS_IN_PREFERRED_ORDER:
- componentWFlag = [c for c in b.getChildren() if c.hasFlags(targetFlag)]
- if componentWFlag != []:
- break
- # some blocks/components are not included in the above list but should still be found
- if not componentWFlag:
- componentWFlag = [c for c in b.getChildren() if c.p.flags in b.p.flags]
- else:
- componentWFlag = [c for c in b.getChildren() if c.hasFlags(flagOfInterest)]
- if len(componentWFlag) == 0:
- # if only 1 solid, be smart enought to snag it
- solidMaterials = list(
- c for c in b if not isinstance(c.material, material.Fluid)
- )
- if len(solidMaterials) == 1:
- componentWFlag = solidMaterials
- if len(componentWFlag) == 0:
- raise RuntimeError(f"No target component found!\n Block {b}")
- if len(componentWFlag) > 1:
- raise RuntimeError(
- "Cannot have more than one component within a block that has the target flag!"
- f"Block {b}\nflagOfInterest {flagOfInterest}\nComponents {componentWFlag}"
- )
- self._componentDeterminesBlockHeight[componentWFlag[0]] = True
- b.p.axialExpTargetComponent = componentWFlag[0].name
-
- def _isFuelLocked(self, b):
- """Physical/realistic implementation reserved for ARMI plugin.
-
- Parameters
- ----------
- b : :py:class:`Block `
- block to specify target component for
-
- Raises
- ------
- RuntimeError
- multiple fuel components found within b
-
- Notes
- -----
- - This serves as an example to check for fuel/clad locking/interaction found in SFRs.
- - A more realistic/physical implementation is reserved for ARMI plugin(s).
- """
- c = b.getComponent(Flags.FUEL)
- if c is None:
- raise RuntimeError(f"No fuel component within {b}!")
- self._componentDeterminesBlockHeight[c] = True
- b.p.axialExpTargetComponent = c.name
-
- def isTargetComponent(self, c):
- """Returns bool if c is a target component.
-
- Parameters
- ----------
- c : :py:class:`Component `
- Component to check target component status
- """
- return bool(c in self._componentDeterminesBlockHeight)
diff --git a/armi/reactor/converters/axialExpansionChanger/__init__.py b/armi/reactor/converters/axialExpansionChanger/__init__.py
new file mode 100644
index 000000000..1eecb064e
--- /dev/null
+++ b/armi/reactor/converters/axialExpansionChanger/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Enable component-wise axial expansion for assemblies and/or a reactor."""
+
+# ruff: noqa: F401
+from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (
+ AssemblyAxialLinkage,
+)
+from armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import (
+ AxialExpansionChanger,
+)
+from armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import (
+ makeAssemsAbleToSnapToUniformMesh,
+)
+from armi.reactor.converters.axialExpansionChanger.expansionData import ExpansionData
+from armi.reactor.converters.axialExpansionChanger.expansionData import (
+ getSolidComponents,
+)
diff --git a/armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py b/armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py
new file mode 100644
index 000000000..c04b1281e
--- /dev/null
+++ b/armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py
@@ -0,0 +1,213 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from armi import runLog
+from armi.reactor.components import UnshapedComponent
+from armi.reactor.converters.axialExpansionChanger.expansionData import (
+ getSolidComponents,
+)
+
+
+def _determineLinked(componentA, componentB):
+ """Determine axial component linkage for two components.
+
+ Parameters
+ ----------
+ componentA : :py:class:`Component `
+ component of interest
+ componentB : :py:class:`Component `
+ component to compare and see if is linked to componentA
+
+ Notes
+ -----
+ - Requires that shapes have the getCircleInnerDiameter and getBoundingCircleOuterDiameter
+ defined
+ - For axial linkage to be True, components MUST be solids, the same Component Class,
+ multiplicity, and meet inner and outer diameter requirements.
+ - When component dimensions are retrieved, cold=True to ensure that dimensions are evaluated
+ at cold/input temperatures. At temperature, solid-solid interfaces in ARMI may produce
+ slight overlaps due to thermal expansion. Handling these potential overlaps are out of scope.
+
+ Returns
+ -------
+ linked : bool
+ status is componentA and componentB are axially linked to one another
+ """
+ if (
+ (componentA.containsSolidMaterial() and componentB.containsSolidMaterial())
+ and isinstance(componentA, type(componentB))
+ and (componentA.getDimension("mult") == componentB.getDimension("mult"))
+ ):
+ if isinstance(componentA, UnshapedComponent):
+ runLog.warning(
+ f"Components {componentA} and {componentB} are UnshapedComponents "
+ "and do not have 'getCircleInnerDiameter' or getBoundingCircleOuterDiameter "
+ "methods; nor is it physical to do so. Instead of crashing and raising an error, "
+ "they are going to be assumed to not be linked.",
+ single=True,
+ )
+ linked = False
+ else:
+ idA, odA = (
+ componentA.getCircleInnerDiameter(cold=True),
+ componentA.getBoundingCircleOuterDiameter(cold=True),
+ )
+ idB, odB = (
+ componentB.getCircleInnerDiameter(cold=True),
+ componentB.getBoundingCircleOuterDiameter(cold=True),
+ )
+
+ biggerID = max(idA, idB)
+ smallerOD = min(odA, odB)
+ if biggerID >= smallerOD:
+ # one object fits inside the other
+ linked = False
+ else:
+ linked = True
+
+ else:
+ linked = False
+
+ return linked
+
+
+class AssemblyAxialLinkage:
+ """Determines and stores the block- and component-wise axial linkage for an assembly.
+
+ Attributes
+ ----------
+ a : :py:class:`Assembly `
+ reference to original assembly; is directly modified/changed during expansion.
+ linkedBlocks : dict
+ - keys = :py:class:`Block `
+ - values = list of axially linked blocks; index 0 = lower linked block; index 1: upper
+ linked block.
+ linkedComponents : dict
+ - keys = :py:class:`Component `
+ - values = list of axially linked components; index 0 = lower linked component;
+ index 1: upper linked component.
+ """
+
+ def __init__(self, StdAssem):
+ self.a = StdAssem
+ self.linkedBlocks = {}
+ self.linkedComponents = {}
+ self._determineAxialLinkage()
+
+ def _determineAxialLinkage(self):
+ """Gets the block and component based linkage."""
+ for b in self.a:
+ self._getLinkedBlocks(b)
+ for c in getSolidComponents(b):
+ self._getLinkedComponents(b, c)
+
+ def _getLinkedBlocks(self, b):
+ """Retrieve the axial linkage for block b.
+
+ Parameters
+ ----------
+ b : :py:class:`Block `
+ block to determine axial linkage for
+
+ Notes
+ -----
+ - block linkage is determined by matching ztop/zbottom (see below)
+ - block linkage is stored in self.linkedBlocks[b]
+ _ _
+ | |
+ | 2 | Block 2 is linked to block 1.
+ |_ _|
+ | |
+ | 1 | Block 1 is linked to both block 0 and 1.
+ |_ _|
+ | |
+ | 0 | Block 0 is linked to block 1.
+ |_ _|
+ """
+ lowerLinkedBlock = None
+ upperLinkedBlock = None
+ block_list = self.a.getChildren()
+ for otherBlk in block_list:
+ if b.name != otherBlk.name:
+ if b.p.zbottom == otherBlk.p.ztop:
+ lowerLinkedBlock = otherBlk
+ elif b.p.ztop == otherBlk.p.zbottom:
+ upperLinkedBlock = otherBlk
+
+ self.linkedBlocks[b] = [lowerLinkedBlock, upperLinkedBlock]
+
+ if lowerLinkedBlock is None:
+ runLog.debug(
+ "Assembly {0:22s} at location {1:22s}, Block {2:22s}"
+ "is not linked to a block below!".format(
+ str(self.a.getName()),
+ str(self.a.getLocation()),
+ str(b.p.flags),
+ ),
+ single=True,
+ )
+ if upperLinkedBlock is None:
+ runLog.debug(
+ "Assembly {0:22s} at location {1:22s}, Block {2:22s}"
+ "is not linked to a block above!".format(
+ str(self.a.getName()),
+ str(self.a.getLocation()),
+ str(b.p.flags),
+ ),
+ single=True,
+ )
+
+ def _getLinkedComponents(self, b, c):
+ """Retrieve the axial linkage for component c.
+
+ Parameters
+ ----------
+ b : :py:class:`Block `
+ key to access blocks containing linked components
+ c : :py:class:`Component `
+ component to determine axial linkage for
+
+ Raises
+ ------
+ RuntimeError
+ multiple candidate components are found to be axially linked to a component
+ """
+ lstLinkedC = [None, None]
+ for ib, linkdBlk in enumerate(self.linkedBlocks[b]):
+ if linkdBlk is not None:
+ for otherC in getSolidComponents(linkdBlk.getChildren()):
+ if _determineLinked(c, otherC):
+ if lstLinkedC[ib] is not None:
+ errMsg = (
+ "Multiple component axial linkages have been found for "
+ f"Component {c}; Block {b}; Assembly {b.parent}."
+ " This is indicative of an error in the blueprints! Linked "
+ f"components found are {lstLinkedC[ib]} and {otherC}"
+ )
+ runLog.error(msg=errMsg)
+ raise RuntimeError(errMsg)
+ lstLinkedC[ib] = otherC
+
+ self.linkedComponents[c] = lstLinkedC
+
+ if lstLinkedC[0] is None:
+ runLog.debug(
+ f"Assembly {self.a}, Block {b}, Component {c} has nothing linked below it!",
+ single=True,
+ )
+ if lstLinkedC[1] is None:
+ runLog.debug(
+ f"Assembly {self.a}, Block {b}, Component {c} has nothing linked above it!",
+ single=True,
+ )
diff --git a/armi/reactor/converters/axialExpansionChanger/axialExpansionChanger.py b/armi/reactor/converters/axialExpansionChanger/axialExpansionChanger.py
new file mode 100644
index 000000000..d7369d258
--- /dev/null
+++ b/armi/reactor/converters/axialExpansionChanger/axialExpansionChanger.py
@@ -0,0 +1,421 @@
+# Copyright 2019 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Enable component-wise axial expansion for assemblies and/or a reactor."""
+
+from numpy import array
+
+from armi import runLog
+from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (
+ AssemblyAxialLinkage,
+)
+from armi.reactor.converters.axialExpansionChanger.expansionData import (
+ ExpansionData,
+ getSolidComponents,
+)
+from armi.reactor.flags import Flags
+
+
+def getDefaultReferenceAssem(assems):
+ """Return a default reference assembly."""
+ # if assemblies are defined in blueprints, handle meshing
+ # assume finest mesh is reference
+ assemsByNumBlocks = sorted(
+ assems,
+ key=lambda a: len(a),
+ reverse=True,
+ )
+ return assemsByNumBlocks[0] if assemsByNumBlocks else None
+
+
+def makeAssemsAbleToSnapToUniformMesh(
+ assems, nonUniformAssemFlags, referenceAssembly=None
+):
+ """Make this set of assemblies aware of the reference mesh so they can stay uniform as they axially expand."""
+ if not referenceAssembly:
+ referenceAssembly = getDefaultReferenceAssem(assems)
+ # make the snap lists so assems know how to expand
+ nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in nonUniformAssemFlags]
+ for a in assems:
+ if any(a.hasFlags(f) for f in nonUniformAssems):
+ continue
+ a.makeAxialSnapList(referenceAssembly)
+
+
+class AxialExpansionChanger:
+ """
+ Axially expand or contract assemblies or an entire core.
+
+ Attributes
+ ----------
+ linked: :py:class:`AssemblyAxialLinkage`
+ establishes object containing axial linkage information
+ expansionData: :py:class:`ExpansionData `
+ establishes object to store and access relevant expansion data
+
+ Notes
+ -----
+ - Is designed to work with general, vertically oriented, pin-type assembly designs. It is not set up to account
+ for any other assembly type.
+ - Useful for fuel performance, thermal expansion, reactivity coefficients, etc.
+ """
+
+ def __init__(self, detailedAxialExpansion: bool = False):
+ """
+ Build an axial expansion converter.
+
+ Parameters
+ ----------
+ detailedAxialExpansion : bool, optional
+ A boolean to indicate whether or not detailedAxialExpansion is to be utilized.
+ """
+ self._detailedAxialExpansion = detailedAxialExpansion
+ self.linked = None
+ self.expansionData = None
+
+ @classmethod
+ def expandColdDimsToHot(
+ cls,
+ assems: list,
+ isDetailedAxialExpansion: bool,
+ referenceAssembly=None,
+ ):
+ """Expand BOL assemblies, resolve disjoint axial mesh (if needed), and update block BOL heights.
+
+ .. impl:: Perform expansion during core construction based on block heights at a specified temperature.
+ :id: I_ARMI_INP_COLD_HEIGHT
+ :implements: R_ARMI_INP_COLD_HEIGHT
+
+ This method is designed to be used during core construction to axially thermally expand the
+ assemblies to their "hot" temperatures (as determined by ``Thot`` values in blueprints).
+ First, The Assembly is prepared for axial expansion via ``setAssembly``. In
+ ``applyColdHeightMassIncrease``, the number densities on each Component is adjusted to
+ reflect that Assembly inputs are at cold (i.e., ``Tinput``) temperatures. To expand to
+ the requested hot temperatures, thermal expansion factors are then computed in
+ ``computeThermalExpansionFactors``. Finally, the Assembly is axially thermally expanded in
+ ``axiallyExpandAssembly``.
+
+ If the setting ``detailedAxialExpansion`` is ``False``, then each Assembly gets its Block mesh
+ set to match that of the "reference" Assembly (see ``getDefaultReferenceAssem`` and ``setBlockMesh``).
+
+ Once the Assemblies are axially expanded, the Block BOL heights are updated. To account for the change in
+ Block volume from axial expansion, ``completeInitialLoading`` is called to update any volume-dependent
+ Block information.
+
+ Parameters
+ ----------
+ assems: list[:py:class:`Assembly `]
+ list of assemblies to be thermally expanded
+ isDetailedAxialExpansion: bool
+ If False, assemblies will be forced to conform to the reference mesh after expansion
+ referenceAssembly: :py:class:`Assembly `, optional
+ Assembly whose mesh other meshes will conform to if isDetailedAxialExpansion is False.
+ If not provided, will assume the finest mesh assembly which is typically fuel.
+
+ Notes
+ -----
+ Calling this method will result in an increase in mass via applyColdHeightMassIncrease!
+
+ See Also
+ --------
+ :py:meth:`armi.reactor.converters.axialExpansionChanger.axialExpansionChanger.AxialExpansionChanger.applyColdHeightMassIncrease`
+ """
+ assems = list(assems)
+ if not referenceAssembly:
+ referenceAssembly = getDefaultReferenceAssem(assems)
+ axialExpChanger = cls(isDetailedAxialExpansion)
+ for a in assems:
+ axialExpChanger.setAssembly(a, expandFromTinputToThot=True)
+ axialExpChanger.applyColdHeightMassIncrease()
+ axialExpChanger.expansionData.computeThermalExpansionFactors()
+ axialExpChanger.axiallyExpandAssembly()
+ if not isDetailedAxialExpansion:
+ for a in assems:
+ a.setBlockMesh(referenceAssembly.getAxialMesh())
+ # update block BOL heights to reflect hot heights
+ for a in assems:
+ for b in a:
+ b.p.heightBOL = b.getHeight()
+ b.completeInitialLoading()
+
+ def performPrescribedAxialExpansion(
+ self, a, componentLst: list, percents: list, setFuel=True
+ ):
+ """Perform axial expansion/contraction of an assembly given prescribed expansion percentages.
+
+ .. impl:: Perform expansion/contraction, given a list of components and expansion coefficients.
+ :id: I_ARMI_AXIAL_EXP_PRESC
+ :implements: R_ARMI_AXIAL_EXP_PRESC
+
+ This method performs component-wise axial expansion for an Assembly given expansion coefficients
+ and a corresponding list of Components. In ``setAssembly``, the Assembly is prepared
+ for axial expansion by determining Component-wise axial linkage and checking to see if a dummy Block
+ is in place (necessary for ensuring conservation properties). The provided expansion factors are
+ then assigned to their corresponding Components in ``setExpansionFactors``. Finally, the axial
+ expansion is performed in ``axiallyExpandAssembly``
+
+ Parameters
+ ----------
+ a : :py:class:`Assembly `
+ ARMI assembly to be changed
+ componentLst : list[:py:class:`Component `]
+ list of Components to be expanded
+ percents : list[float]
+ list of expansion percentages for each component listed in componentList
+ setFuel : boolean, optional
+ Boolean to determine whether or not fuel blocks should have their target components set
+ This is useful when target components within a fuel block need to be determined on-the-fly.
+
+ Notes
+ -----
+ - percents may be positive (expansion) or negative (contraction)
+ """
+ self.setAssembly(a, setFuel)
+ self.expansionData.setExpansionFactors(componentLst, percents)
+ self.axiallyExpandAssembly()
+
+ def performThermalAxialExpansion(
+ self,
+ a,
+ tempGrid: list,
+ tempField: list,
+ setFuel: bool = True,
+ expandFromTinputToThot: bool = False,
+ ):
+ """Perform thermal expansion/contraction for an assembly given an axial temperature grid and
+ field.
+
+ .. impl:: Perform thermal expansion/contraction, given an axial temperature distribution
+ over an assembly.
+ :id: I_ARMI_AXIAL_EXP_THERM
+ :implements: R_ARMI_AXIAL_EXP_THERM
+
+ This method performs component-wise thermal expansion for an assembly given a discrete
+ temperature distribution over the axial length of the Assembly. In ``setAssembly``, the
+ Assembly is prepared for axial expansion by determining Component-wise axial linkage and
+ checking to see if a dummy Block is in place (necessary for ensuring conservation
+ properties). The discrete temperature distribution is then leveraged to update Component
+ temperatures and compute thermal expansion factors (via
+ ``updateComponentTempsBy1DTempField`` and ``computeThermalExpansionFactors``,
+ respectively). Finally, the axial expansion is performed in ``axiallyExpandAssembly``.
+
+ Parameters
+ ----------
+ a : :py:class:`Assembly `
+ ARMI assembly to be changed
+ tempGrid : float, list
+ Axial temperature grid (in cm) (i.e., physical locations where temp is stored)
+ tempField : float, list
+ Temperature values (in C) along grid
+ setFuel : boolean, optional
+ Boolean to determine whether or not fuel blocks should have their target components set
+ This is useful when target components within a fuel block need to be determined on-the-fly.
+ expandFromTinputToThot: bool
+ determines if thermal expansion factors should be calculated from c.inputTemperatureInC
+ to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)
+ """
+ self.setAssembly(a, setFuel, expandFromTinputToThot)
+ self.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField)
+ self.expansionData.computeThermalExpansionFactors()
+ self.axiallyExpandAssembly()
+
+ def reset(self):
+ self.linked = None
+ self.expansionData = None
+
+ def setAssembly(self, a, setFuel=True, expandFromTinputToThot=False):
+ """Set the armi assembly to be changed and init expansion data class for assembly.
+
+ Parameters
+ ----------
+ a : :py:class:`Assembly `
+ ARMI assembly to be changed
+ setFuel : boolean, optional
+ Boolean to determine whether or not fuel blocks should have their target components set
+ This is useful when target components within a fuel block need to be determined on-the-fly.
+ expandFromTinputToThot: bool
+ determines if thermal expansion factors should be calculated from c.inputTemperatureInC
+ to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)
+
+ Notes
+ -----
+ When considering thermal expansion, if there is an axial temperature distribution on the
+ assembly, the axial expansion methodology will NOT perfectly preserve mass. The magnitude of
+ the gradient of the temperature distribution is the primary factor in determining the
+ cumulative loss of mass conservation.
+ """
+ self.linked = AssemblyAxialLinkage(a)
+ self.expansionData = ExpansionData(
+ a, setFuel=setFuel, expandFromTinputToThot=expandFromTinputToThot
+ )
+ self._isTopDummyBlockPresent()
+
+ def applyColdHeightMassIncrease(self):
+ """
+ Increase component mass because they are declared at cold dims.
+
+ Notes
+ -----
+ A cold 1 cm tall component will have more mass that a component with the
+ same mass/length as a component with a hot height of 1 cm. This should be
+ called when the setting `inputHeightsConsideredHot` is used. This adjusts
+ the expansion factor applied during applyMaterialMassFracsToNumberDensities.
+ """
+ for c in self.linked.a.getComponents():
+ axialExpansionFactor = 1.0 + c.material.linearExpansionFactor(
+ c.temperatureInC, c.inputTemperatureInC
+ )
+ c.changeNDensByFactor(axialExpansionFactor)
+
+ def _isTopDummyBlockPresent(self):
+ """Determines if top most block of assembly is a dummy block.
+
+ Notes
+ -----
+ - If true, then axial expansion will be physical for all blocks.
+ - If false, the top most block in the assembly is artificially chopped
+ to preserve the assembly height. A runLog.Warning also issued.
+ """
+ blkLst = self.linked.a.getBlocks()
+ if not blkLst[-1].hasFlags(Flags.DUMMY):
+ runLog.warning(
+ f"No dummy block present at the top of {self.linked.a}! "
+ "Top most block will be artificially chopped "
+ "to preserve assembly height"
+ )
+ if self._detailedAxialExpansion:
+ msg = "Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!"
+ runLog.error(msg)
+ raise RuntimeError(msg)
+
+ def axiallyExpandAssembly(self):
+ """Utilizes assembly linkage to do axial expansion.
+
+ .. impl:: Preserve the total height of an ARMI assembly, during expansion.
+ :id: I_ARMI_ASSEM_HEIGHT_PRES
+ :implements: R_ARMI_ASSEM_HEIGHT_PRES
+
+ The total height of an Assembly is preserved by not changing the ``ztop`` position
+ of the top-most Block in an Assembly. The ``zbottom`` of the top-most Block is
+ adjusted to match the Block immediately below it. The ``height`` of the
+ top-most Block is is then updated to reflect any expansion/contraction.
+ """
+ mesh = [0.0]
+ numOfBlocks = self.linked.a.countBlocksWithFlags()
+ runLog.debug(
+ "Printing component expansion information (growth percentage and 'target component')"
+ f"for each block in assembly {self.linked.a}."
+ )
+ for ib, b in enumerate(self.linked.a):
+ runLog.debug(msg=f" Block {b}")
+ blockHeight = b.getHeight()
+ # set bottom of block equal to top of block below it
+ # if ib == 0, leave block bottom = 0.0
+ if ib > 0:
+ b.p.zbottom = self.linked.linkedBlocks[b][0].p.ztop
+ isDummyBlock = ib == (numOfBlocks - 1)
+ if not isDummyBlock:
+ for c in getSolidComponents(b):
+ growFrac = self.expansionData.getExpansionFactor(c)
+ runLog.debug(msg=f" Component {c}, growFrac = {growFrac:.4e}")
+ c.height = growFrac * blockHeight
+ # align linked components
+ if ib == 0:
+ c.zbottom = 0.0
+ else:
+ if self.linked.linkedComponents[c][0] is not None:
+ # use linked components below
+ c.zbottom = self.linked.linkedComponents[c][0].ztop
+ else:
+ # otherwise there aren't any linked components
+ # so just set the bottom of the component to
+ # the top of the block below it
+ c.zbottom = self.linked.linkedBlocks[b][0].p.ztop
+ c.ztop = c.zbottom + c.height
+ # update component number densities
+ newNumberDensities = {
+ nuc: c.getNumberDensity(nuc) / growFrac
+ for nuc in c.getNuclides()
+ }
+ c.setNumberDensities(newNumberDensities)
+ # redistribute block boundaries if on the target component
+ if self.expansionData.isTargetComponent(c):
+ b.p.ztop = c.ztop
+ b.p.height = b.p.ztop - b.p.zbottom
+ else:
+ b.p.height = b.p.ztop - b.p.zbottom
+
+ b.p.z = b.p.zbottom + b.getHeight() / 2.0
+
+ _checkBlockHeight(b)
+ # Call Component.clearCache to update the Component volume, and therefore the masses,
+ # of all components.
+ for c in b:
+ c.clearCache()
+ # redo mesh -- functionality based on assembly.calculateZCoords()
+ mesh.append(b.p.ztop)
+ b.spatialLocator = self.linked.a.spatialGrid[0, 0, ib]
+
+ bounds = list(self.linked.a.spatialGrid._bounds)
+ bounds[2] = array(mesh)
+ self.linked.a.spatialGrid._bounds = tuple(bounds)
+
+ def manageCoreMesh(self, r):
+ """Manage core mesh post assembly-level expansion.
+
+ Parameters
+ ----------
+ r : :py:class:`Reactor `
+ ARMI reactor to have mesh modified
+
+ Notes
+ -----
+ - if no detailedAxialExpansion, then do "cheap" approach to uniformMesh converter.
+ - update average core mesh values with call to r.core.updateAxialMesh()
+ - oldMesh will be None during initial core construction at processLoading as it has not yet
+ been set.
+ """
+ if not self._detailedAxialExpansion:
+ # loop through again now that the reference is adjusted and adjust the non-fuel assemblies.
+ for a in r.core.getAssemblies():
+ a.setBlockMesh(r.core.refAssem.getAxialMesh())
+
+ oldMesh = r.core.p.axialMesh
+ r.core.updateAxialMesh()
+ if oldMesh:
+ runLog.extra("Updated r.core.p.axialMesh (old, new)")
+ for old, new in zip(oldMesh, r.core.p.axialMesh):
+ runLog.extra(f"{old:.6e}\t{new:.6e}")
+
+
+def _checkBlockHeight(b):
+ """
+ Do some basic block height validation.
+
+ Notes
+ -----
+ 3cm is a presumptive lower threshhold for DIF3D
+ """
+ if b.getHeight() < 3.0:
+ runLog.debug(
+ "Block {0:s} ({1:s}) has a height less than 3.0 cm. ({2:.12e})".format(
+ b.name, str(b.p.flags), b.getHeight()
+ )
+ )
+
+ if b.getHeight() < 0.0:
+ raise ArithmeticError(
+ "Block {0:s} ({1:s}) has a negative height! ({2:.12e})".format(
+ b.name, str(b.p.flags), b.getHeight()
+ )
+ )
diff --git a/armi/reactor/converters/axialExpansionChanger/expansionData.py b/armi/reactor/converters/axialExpansionChanger/expansionData.py
new file mode 100644
index 000000000..bf8259d97
--- /dev/null
+++ b/armi/reactor/converters/axialExpansionChanger/expansionData.py
@@ -0,0 +1,310 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Data container for axial expansion."""
+
+from statistics import mean
+from typing import List
+
+from armi import runLog
+from armi.materials import material
+from armi.reactor.flags import Flags
+
+TARGET_FLAGS_IN_PREFERRED_ORDER = [
+ Flags.FUEL,
+ Flags.CONTROL,
+ Flags.POISON,
+ Flags.SHIELD,
+ Flags.SLUG,
+]
+
+
+def getSolidComponents(b):
+ """
+ Return list of components in the block that have solid material.
+
+ Notes
+ -----
+ Axial expansion only needs to be applied to solid materials. We should not update
+ number densities on fluid materials to account for changes in block height.
+ """
+ return [c for c in b if not isinstance(c.material, material.Fluid)]
+
+
+class ExpansionData:
+ """Data container for axial expansion."""
+
+ def __init__(self, a, setFuel: bool, expandFromTinputToThot: bool):
+ """
+ Parameters
+ ----------
+ a: :py:class:`Assembly `
+ Assembly to assign component-wise expansion data to
+ setFuel: bool
+ used to determine if fuel component should be set as
+ axial expansion target component during initialization.
+ see self._isFuelLocked
+ expandFromTinputToThot: bool
+ determines if thermal expansion factors should be calculated
+ from c.inputTemperatureInC to c.temperatureInC (True) or some other
+ reference temperature and c.temperatureInC (False)
+ """
+ self._a = a
+ self.componentReferenceTemperature = {}
+ self._expansionFactors = {}
+ self._componentDeterminesBlockHeight = {}
+ self._setTargetComponents(setFuel)
+ self.expandFromTinputToThot = expandFromTinputToThot
+
+ def setExpansionFactors(self, componentLst: List, expFrac: List):
+ """Sets user defined expansion fractions.
+
+ Parameters
+ ----------
+ componentLst : List[:py:class:`Component `]
+ list of Components to have their heights changed
+ expFrac : List[float]
+ list of L1/L0 height changes that are to be applied to componentLst
+
+ Raises
+ ------
+ RuntimeError
+ If componentLst and expFrac are different lengths
+ """
+ if len(componentLst) != len(expFrac):
+ runLog.error(
+ "Number of components and expansion fractions must be the same!\n"
+ f" len(componentLst) = {len(componentLst)}\n"
+ f" len(expFrac) = {len(expFrac)}"
+ )
+ raise RuntimeError
+ if 0.0 in expFrac:
+ msg = (
+ "An expansion fraction, L1/L0, equal to 0.0, is not physical. Expansion fractions "
+ "should be greater than 0.0."
+ )
+ runLog.error(msg)
+ raise RuntimeError(msg)
+ for exp in expFrac:
+ if exp < 0.0:
+ msg = (
+ "A negative expansion fraction, L1/L0, is not physical. Expansion fractions "
+ "should be greater than 0.0."
+ )
+ runLog.error(msg)
+ raise RuntimeError(msg)
+ for c, p in zip(componentLst, expFrac):
+ self._expansionFactors[c] = p
+
+ def updateComponentTempsBy1DTempField(self, tempGrid, tempField):
+ """Assign a block-average axial temperature to components.
+
+ Parameters
+ ----------
+ tempGrid : numpy array
+ 1D axial temperature grid (i.e., physical locations where temp is stored)
+ tempField : numpy array
+ temperature values along grid
+
+ Notes
+ -----
+ - given a 1D axial temperature grid and distribution, searches for temperatures that fall
+ within the bounds of a block, and averages them
+ - this average temperature is then passed to self.updateComponentTemp()
+
+ Raises
+ ------
+ ValueError
+ if no temperature points found within a block
+ RuntimeError
+ if tempGrid and tempField are different lengths
+ """
+ if len(tempGrid) != len(tempField):
+ runLog.error("tempGrid and tempField must have the same length.")
+ raise RuntimeError
+
+ self.componentReferenceTemperature = {} # reset, just to be safe
+ for b in self._a:
+ tmpMapping = []
+ for idz, z in enumerate(tempGrid):
+ if b.p.zbottom <= z <= b.p.ztop:
+ tmpMapping.append(tempField[idz])
+ if z > b.p.ztop:
+ break
+
+ if len(tmpMapping) == 0:
+ raise ValueError(
+ f"{b} has no temperature points within it!"
+ "Likely need to increase the refinement of the temperature grid."
+ )
+
+ blockAveTemp = mean(tmpMapping)
+ for c in b:
+ self.updateComponentTemp(c, blockAveTemp)
+
+ def updateComponentTemp(self, c, temp: float):
+ """Update component temperatures with a provided temperature.
+
+ Parameters
+ ----------
+ c : :py:class:`Component `
+ component to which the temperature, temp, is to be applied
+ temp : float
+ new component temperature in C
+
+ Notes
+ -----
+ - "reference" height and temperature are the current states; i.e. before
+ 1) the new temperature, temp, is applied to the component, and
+ 2) the component is axially expanded
+ """
+ self.componentReferenceTemperature[c] = c.temperatureInC
+ c.setTemperature(temp)
+
+ def computeThermalExpansionFactors(self):
+ """Computes expansion factors for all components via thermal expansion."""
+ for b in self._a:
+ for c in getSolidComponents(b):
+ if self.expandFromTinputToThot:
+ # get thermal expansion factor between c.inputTemperatureInC & c.temperatureInC
+ self._expansionFactors[c] = c.getThermalExpansionFactor()
+ elif c in self.componentReferenceTemperature:
+ growFrac = c.getThermalExpansionFactor(
+ T0=self.componentReferenceTemperature[c]
+ )
+ self._expansionFactors[c] = growFrac
+ else:
+ # We want expansion factors relative to componentReferenceTemperature not
+ # Tinput. But for this component there isn't a componentReferenceTemperature, so
+ # we'll assume that the expansion factor is 1.0.
+ self._expansionFactors[c] = 1.0
+
+ def getExpansionFactor(self, c):
+ """Retrieves expansion factor for c.
+
+ Parameters
+ ----------
+ c : :py:class:`Component `
+ Component to retrive expansion factor for
+ """
+ value = self._expansionFactors.get(c, 1.0)
+ return value
+
+ def _setTargetComponents(self, setFuel):
+ """Sets target component for each block.
+
+ Parameters
+ ----------
+ setFuel : bool
+ boolean to determine if fuel block should have its target component set. Useful for when
+ target components should be determined on the fly.
+ """
+ for b in self._a:
+ if b.p.axialExpTargetComponent:
+ self._componentDeterminesBlockHeight[
+ b.getComponentByName(b.p.axialExpTargetComponent)
+ ] = True
+ elif b.hasFlags(Flags.PLENUM) or b.hasFlags(Flags.ACLP):
+ self.determineTargetComponent(b, Flags.CLAD)
+ elif b.hasFlags(Flags.DUMMY):
+ self.determineTargetComponent(b, Flags.COOLANT)
+ elif setFuel and b.hasFlags(Flags.FUEL):
+ self._isFuelLocked(b)
+ else:
+ self.determineTargetComponent(b)
+
+ def determineTargetComponent(self, b, flagOfInterest=None):
+ """Determines target component, stores it on the block, and appends it to
+ self._componentDeterminesBlockHeight.
+
+ Parameters
+ ----------
+ b : :py:class:`Block `
+ block to specify target component for
+ flagOfInterest : :py:class:`Flags `
+ the flag of interest to identify the target component
+
+ Notes
+ -----
+ - if flagOfInterest is None, finds the component within b that contains flags that
+ are defined in a preferred order of flags, or barring that, in b.p.flags
+ - if flagOfInterest is not None, finds the component that contains the flagOfInterest.
+
+ Raises
+ ------
+ RuntimeError
+ no target component found
+ RuntimeError
+ multiple target components found
+ """
+ if flagOfInterest is None:
+ # Follow expansion of most neutronically important component, fuel then control/poison
+ for targetFlag in TARGET_FLAGS_IN_PREFERRED_ORDER:
+ componentWFlag = [c for c in b.getChildren() if c.hasFlags(targetFlag)]
+ if componentWFlag != []:
+ break
+ # some blocks/components are not included in the above list but should still be found
+ if not componentWFlag:
+ componentWFlag = [c for c in b.getChildren() if c.p.flags in b.p.flags]
+ else:
+ componentWFlag = [c for c in b.getChildren() if c.hasFlags(flagOfInterest)]
+ if len(componentWFlag) == 0:
+ # if only 1 solid, be smart enought to snag it
+ solidMaterials = list(
+ c for c in b if not isinstance(c.material, material.Fluid)
+ )
+ if len(solidMaterials) == 1:
+ componentWFlag = solidMaterials
+ if len(componentWFlag) == 0:
+ raise RuntimeError(f"No target component found!\n Block {b}")
+ if len(componentWFlag) > 1:
+ raise RuntimeError(
+ "Cannot have more than one component within a block that has the target flag!"
+ f"Block {b}\nflagOfInterest {flagOfInterest}\nComponents {componentWFlag}"
+ )
+ self._componentDeterminesBlockHeight[componentWFlag[0]] = True
+ b.p.axialExpTargetComponent = componentWFlag[0].name
+
+ def _isFuelLocked(self, b):
+ """Physical/realistic implementation reserved for ARMI plugin.
+
+ Parameters
+ ----------
+ b : :py:class:`Block `
+ block to specify target component for
+
+ Raises
+ ------
+ RuntimeError
+ multiple fuel components found within b
+
+ Notes
+ -----
+ - This serves as an example to check for fuel/clad locking/interaction found in SFRs.
+ - A more realistic/physical implementation is reserved for ARMI plugin(s).
+ """
+ c = b.getComponent(Flags.FUEL)
+ if c is None:
+ raise RuntimeError(f"No fuel component within {b}!")
+ self._componentDeterminesBlockHeight[c] = True
+ b.p.axialExpTargetComponent = c.name
+
+ def isTargetComponent(self, c):
+ """Returns bool if c is a target component.
+
+ Parameters
+ ----------
+ c : :py:class:`Component `
+ Component to check target component status
+ """
+ return bool(c in self._componentDeterminesBlockHeight)
diff --git a/armi/reactor/converters/blockConverters.py b/armi/reactor/converters/blockConverters.py
index 62f7873f4..dac9a9588 100644
--- a/armi/reactor/converters/blockConverters.py
+++ b/armi/reactor/converters/blockConverters.py
@@ -16,17 +16,15 @@
import copy
import math
-import numpy
import matplotlib
import matplotlib.pyplot as plt
-from matplotlib.patches import Wedge
+import numpy as np
from matplotlib.collections import PatchCollection
+from matplotlib.patches import Wedge
-from armi.reactor import blocks
-from armi.reactor import grids
-from armi.reactor import components
-from armi.reactor.flags import Flags
from armi import runLog
+from armi.reactor import blocks, components, grids
+from armi.reactor.flags import Flags
SIN60 = math.sin(math.radians(60.0))
@@ -141,12 +139,15 @@ def _checkInputs(self, soluteName, solventName, solute, solvent):
"Components are not of compatible shape to be merged "
"solute: {}, solvent: {}".format(solute, solvent)
)
- if solute.getArea() <= 0 or solvent.getArea() <= 0:
+ if solute.getArea() < 0:
raise ValueError(
- "Cannot merge components if either have negative area. "
- "{} area: {}, {} area : {}".format(
- solute, solvent, solute.getArea(), solvent.getArea()
- )
+ "Cannot merge solute with negative area into a solvent. "
+ "{} area: {}".format(solute, solute.getArea())
+ )
+ if solvent.getArea() <= 0:
+ raise ValueError(
+ "Cannot merge into a solvent with negative or 0 area. "
+ "{} area: {}".format(solvent, solvent.getArea())
)
def restablishLinks(self, solute, solvent, soluteLinks):
@@ -311,7 +312,8 @@ def convert(self):
soluteName, self.solventName, minID=self.specifiedMinID
)
solvent = self._sourceBlock.getComponentByName(self.solventName)
- BlockConverter._verifyExpansion(self, self.soluteNames, solvent)
+ if solvent.__class__ is not components.DerivedShape:
+ BlockConverter._verifyExpansion(self, self.soluteNames, solvent)
return self._sourceBlock
@@ -479,7 +481,7 @@ def plotConvertedBlock(self, fName=None):
colors.append(circleComp.density())
colorMap = matplotlib.cm
p = PatchCollection(patches, alpha=1.0, linewidths=0.1, cmap=colorMap.YlGn)
- p.set_array(numpy.array(colors))
+ p.set_array(np.array(colors))
ax.add_collection(p)
ax.autoscale_view(True, True, True)
ax.set_aspect("equal")
@@ -488,6 +490,7 @@ def plotConvertedBlock(self, fName=None):
plt.savefig(fName)
else:
plt.show()
+ plt.close()
return fName
diff --git a/armi/reactor/converters/geometryConverters.py b/armi/reactor/converters/geometryConverters.py
index ed1ba1011..83944dc2c 100644
--- a/armi/reactor/converters/geometryConverters.py
+++ b/armi/reactor/converters/geometryConverters.py
@@ -33,7 +33,7 @@
import matplotlib
import matplotlib.pyplot as plt
-import numpy
+import numpy as np
from armi import materials
from armi import runLog
@@ -267,7 +267,7 @@ def addRing(self, assemType="big shield"):
# first look through the core and finds the one farthest from the center
maxDist = 0.0
for assem in r.core.getAssemblies():
- dist = numpy.linalg.norm(
+ dist = np.linalg.norm(
assem.spatialLocator.getGlobalCoordinates()
) # get distance from origin
dist = round(
@@ -301,7 +301,7 @@ def addRing(self, assemType="big shield"):
assem = r.core.childrenByLocator.get(
locator
) # check on assemblies, moving radially outward
- dist = numpy.linalg.norm(locator.getGlobalCoordinates())
+ dist = np.linalg.norm(locator.getGlobalCoordinates())
dist = round(dist, 6)
if dist <= newRingDist: # check distance
if assem is None: # no assembly in that position, add assembly
@@ -532,7 +532,7 @@ def convert(self, r):
# replace temporary index-based ring indices with actual radial distances
self.convReactor.core.spatialGrid._bounds = (
self.convReactor.core.spatialGrid._bounds[0],
- numpy.array(radialMeshCm),
+ np.array(radialMeshCm),
self.convReactor.core.spatialGrid._bounds[2],
)
@@ -756,7 +756,7 @@ def _createRadialThetaZone(
thetaIndex, radialIndex, 0
]
newAssembly.p.AziMesh = 2
- newAssembly.spatialGrid = grids.axialUnitGrid(
+ newAssembly.spatialGrid = grids.AxialGrid.fromNCells(
len(self.meshConverter.axialMesh), armiObject=newAssembly
)
@@ -1019,22 +1019,24 @@ def _writeRadialThetaZoneHeader(
)
)
runLog.debug(
- "{} Axial Zone - Axial Height (cm) Block Number Block Type XS ID : Original Hex Block XS ID(s)".format(
- 9 * STR_SPACE
- )
+ "{} Axial Zone - Axial Height (cm) Block Number Block Type XS ID : "
+ "Original Hex Block XS ID(s)".format(9 * STR_SPACE)
)
runLog.debug(
- "{} ---------- - ----------------- ------------ ---------------------- ----- : ---------------------------".format(
- 9 * STR_SPACE
- )
+ "{} ---------- - ----------------- ------------ ---------------------- ----- : "
+ "---------------------------".format(9 * STR_SPACE)
)
def _writeRadialThetaZoneInfo(self, axIdx, axialSegmentHeight, blockObj):
- """Create a summary of the mapping between the converted reactor block ids to the hex reactor block ids."""
+ """
+ Create a summary of the mapping between the converted reactor block ids to the hex
+ reactor block ids.
+ """
self._newBlockNum += 1
hexBlockXsIds = []
for hexBlock in self.blockMap[blockObj]:
hexBlockXsIds.append(hexBlock.getMicroSuffix())
+
runLog.debug(
"{} {:<10} - {:<17.3f} {:<12} {:<22} {:<5} : {}".format(
9 * STR_SPACE,
@@ -1061,14 +1063,13 @@ def plotConvertedReactor(self, fNameBase=None):
Parameters
----------
fNameBase : str, optional
- A name that will form the basis of the N plots that
- are generated by this method. Will get split on extension
- and have numbers added. Should be like ``coreMap.png``.
+ A name that will form the basis of the N plots that are generated by this method. Will
+ get split on extension and have numbers added. Should be like ``coreMap.png``.
Notes
-----
- XTView can be used to view the RZT reactor but this is useful to examine the
- conversion of the hex-z reactor to the rzt reactor.
+ XTView can be used to view the RZT reactor but this is useful to examine the conversion of
+ the hex-z reactor to the rzt reactor.
This makes plots of each individual theta mesh
"""
@@ -1147,6 +1148,7 @@ def plotConvertedReactor(self, fNameBase=None):
else:
figs.append(fig)
innerTheta = outerTheta
+
return figs
def _getReactorMeshCoordinates(self):
@@ -1261,7 +1263,7 @@ def _scaleBlockVolIntegratedParams(self, b, direction):
for param in self.listOfVolIntegratedParamsToScale:
if b.p[param] is None:
continue
- if type(b.p[param]) == list:
+ if type(b.p[param]) is list:
# some params like volume-integrated mg flux are lists
b.p[param] = [op(val, 3) for val in b.p[param]]
else:
@@ -1275,19 +1277,16 @@ def convert(self, r):
:id: I_ARMI_THIRD_TO_FULL_CORE0
:implements: R_ARMI_THIRD_TO_FULL_CORE
- This method first checks if the input reactor is already full core.
- If full-core symmetry is detected, the input reactor is returned.
- If not, it then verifies that the input reactor has the expected one-third
- core symmetry and HEX geometry.
+ This method first checks if the input reactor is already full core. If full-core
+ symmetry is detected, the input reactor is returned. If not, it then verifies that the
+ input reactor has the expected one-third core symmetry and HEX geometry.
- Upon conversion, it loops over the assembly vector of the source
- one-third core model, copies and rotates each source assembly to create
- new assemblies, and adds them on the full-core grid. For the center assembly,
- it modifies its parameters.
+ Upon conversion, it loops over the assembly vector of the source one-third core model,
+ copies and rotates each source assembly to create new assemblies, and adds them on the
+ full-core grid. For the center assembly, it modifies its parameters.
Finally, it sets the domain type to full core.
-
Parameters
----------
sourceReactor : Reactor object
@@ -1380,10 +1379,10 @@ def restorePreviousGeometry(self, r=None):
:id: I_ARMI_THIRD_TO_FULL_CORE1
:implements: R_ARMI_THIRD_TO_FULL_CORE
- This method is a reverse process of the method ``convert``. It converts
- the full-core reactor model back to the original one-third core reactor model by removing
- the added assemblies and changing the parameters of the center
- assembly from full core to one third core.
+ This method is a reverse process of the method ``convert``. It converts the full-core
+ reactor model back to the original one-third core reactor model by removing the added
+ assemblies and changing the parameters of the center assembly from full core to one
+ third core.
"""
r = r or self._sourceReactor
@@ -1426,10 +1425,10 @@ def addEdgeAssemblies(self, core):
:id: I_ARMI_ADD_EDGE_ASSEMS0
:implements: R_ARMI_ADD_EDGE_ASSEMS
- Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are added
- because they are needed for DIF3D-finite difference or MCNP models. This is done
- by copying the assemblies from the lower boundary and placing them in their
- reflective positions on the upper boundary of the symmetry line.
+ Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are
+ added because they are needed for DIF3D-finite difference or MCNP models. This is done
+ by copying the assemblies from the lower boundary and placing them in their reflective
+ positions on the upper boundary of the symmetry line.
Parameters
----------
@@ -1494,16 +1493,15 @@ def removeEdgeAssemblies(self, core):
"""
Remove the edge assemblies in preparation for the nodal diffusion approximation.
- This makes use of the assemblies knowledge of if it is in a region that it
- needs to be removed.
+ This makes use of the assemblies knowledge of if it is in a region that it needs to be
+ removed.
.. impl:: Remove assemblies along the 120-degree line from a reactor.
:id: I_ARMI_ADD_EDGE_ASSEMS1
:implements: R_ARMI_ADD_EDGE_ASSEMS
- This method is the reverse process of the method ``addEdgeAssemblies``. It is
- needed for the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree
- symmetry line.
+ This method is the reverse process of the method ``addEdgeAssemblies``. It is needed for
+ the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree symmetry line.
See Also
--------
@@ -1515,8 +1513,8 @@ def removeEdgeAssemblies(self, core):
assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(
grids.BOUNDARY_0_DEGREES
)
- # don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh
- # case that has edge assems
+ # Don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh case that has edge
+ # assems.
edgeAssemblies = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES)
for a in edgeAssemblies:
runLog.debug(
@@ -1597,7 +1595,7 @@ def _scaleParamsInBlock(b, bSymmetric, completeListOfParamsToScale):
"""Scale volume-integrated params to include their identical symmetric assemblies."""
listOfVolumeIntegratedParamsToScale, fluxParamsToScale = completeListOfParamsToScale
for paramName in [
- pn for pn in listOfVolumeIntegratedParamsToScale if numpy.any(b.p[pn])
+ pn for pn in listOfVolumeIntegratedParamsToScale if np.any(b.p[pn])
]:
runLog.debug(
"Scaling {} in symmetric identical assemblies".format(paramName),
diff --git a/armi/reactor/converters/meshConverters.py b/armi/reactor/converters/meshConverters.py
index ba630f3b8..769770e63 100644
--- a/armi/reactor/converters/meshConverters.py
+++ b/armi/reactor/converters/meshConverters.py
@@ -14,11 +14,11 @@
"""Mesh specifiers update the mesh structure of a reactor by increasing or decreasing the number of mesh coordinates."""
-import math
import collections
+import math
import itertools
-import numpy
+import numpy as np
from armi import runLog
from armi.reactor import grids
@@ -160,7 +160,7 @@ def setThetaMesh(self):
def _generateUniformThetaMesh(self):
"""Create a uniform theta mesh over 2*pi using the user specified number of theta bins."""
self.thetaMesh = list(
- numpy.linspace(0, 2 * math.pi, self._numThetaMeshBins + 1)[1:]
+ np.linspace(0, 2 * math.pi, self._numThetaMeshBins + 1)[1:]
)
def _generateNonUniformThetaMesh(self):
@@ -396,7 +396,7 @@ def checkLastValueInList(
msg = "The last value in {} is {} and should be {}".format(
listName, inputList[-1], expectedValue
)
- if not numpy.isclose(inputList[-1], expectedValue, eps):
+ if not np.isclose(inputList[-1], expectedValue, eps):
if adjustLastValue:
del inputList[-1]
inputList.append(expectedValue)
diff --git a/armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py b/armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py
index 63fd66c00..4592cca61 100644
--- a/armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py
+++ b/armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py
@@ -32,7 +32,9 @@
class TestParamSweepConverters(unittest.TestCase):
def setUp(self):
- self.o, self.r = loadTestReactor(TEST_ROOT)
+ self.o, self.r = loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.cs = self.o.cs
def test_paramSweepConverter(self):
diff --git a/armi/reactor/converters/tests/test_axialExpansionChanger.py b/armi/reactor/converters/tests/test_axialExpansionChanger.py
index 49a2d7cfd..bf69ac64a 100644
--- a/armi/reactor/converters/tests/test_axialExpansionChanger.py
+++ b/armi/reactor/converters/tests/test_axialExpansionChanger.py
@@ -18,6 +18,8 @@
import unittest
from statistics import mean
+from numpy import array, linspace, zeros
+
from armi import materials
from armi.materials import _MATERIAL_NAMESPACE_ORDER, custom
from armi.reactor.assemblies import HexAssembly, grids
@@ -28,14 +30,15 @@
from armi.reactor.converters.axialExpansionChanger import (
AxialExpansionChanger,
ExpansionData,
- _determineLinked,
getSolidComponents,
)
+from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (
+ _determineLinked,
+)
from armi.reactor.flags import Flags
from armi.reactor.tests.test_reactors import loadTestReactor, reduceTestReactorRings
from armi.tests import TEST_ROOT
from armi.utils import units
-from numpy import array, linspace, zeros
class AxialExpansionTestBase(unittest.TestCase):
@@ -448,10 +451,14 @@ def test_noMovementACLP(self):
.. test:: Ensure the ACLP does not move during fuel-only expansion.
:id: T_ARMI_AXIAL_EXP_PRESC1
:tests: R_ARMI_AXIAL_EXP_PRESC
+
+ .. test:: Ensure the component volumes are correctly updated during prescribed expansion.
+ :id: T_ARMI_AXIAL_EXP_PRESC2
+ :tests: R_ARMI_AXIAL_EXP_PRESC
"""
# build test assembly with ACLP
assembly = HexAssembly("testAssemblyType")
- assembly.spatialGrid = grids.axialUnitGrid(numCells=1)
+ assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)
assembly.spatialGrid.armiObject = assembly
assembly.add(_buildTestBlock("shield", "FakeMat", 25.0, 10.0))
assembly.add(_buildTestBlock("fuel", "FakeMat", 25.0, 10.0))
@@ -470,6 +477,9 @@ def test_noMovementACLP(self):
aclpZTop = aclp.p.ztop
aclpZBottom = aclp.p.zbottom
+ # get total assembly fluid mass pre-expansion
+ preExpAssemFluidMass = self._getTotalAssemblyFluidMass(assembly)
+
# expand fuel
# get fuel components
cList = [c for b in assembly for c in b if c.hasFlags(Flags.FUEL)]
@@ -478,6 +488,9 @@ def test_noMovementACLP(self):
chngr = AxialExpansionChanger()
chngr.performPrescribedAxialExpansion(assembly, cList, pList, setFuel=True)
+ # get total assembly fluid mass post-expansion
+ postExpAssemFluidMass = self._getTotalAssemblyFluidMass(assembly)
+
# do assertion
self.assertEqual(
aclpZBottom,
@@ -490,6 +503,26 @@ def test_noMovementACLP(self):
msg="ACLP ztop has changed. It should not with fuel component only expansion!",
)
+ # verify that the component volumes are correctly updated
+ for b in assembly:
+ for c in b:
+ self.assertAlmostEqual(
+ c.getArea() * b.getHeight(),
+ c.getVolume(),
+ places=12,
+ )
+ # verify that the total assembly fluid mass is preserved through expansion
+ self.assertAlmostEqual(preExpAssemFluidMass, postExpAssemFluidMass, places=11)
+
+ @staticmethod
+ def _getTotalAssemblyFluidMass(assembly) -> float:
+ totalAssemblyFluidMass = 0.0
+ for b in assembly:
+ for c in b:
+ if isinstance(c.material, materials.material.Fluid):
+ totalAssemblyFluidMass += c.getMass()
+ return totalAssemblyFluidMass
+
def test_reset(self):
self.obj.setAssembly(self.a)
self.obj.reset()
@@ -564,7 +597,7 @@ def tearDown(self):
def test_isTopDummyBlockPresent(self):
# build test assembly without dummy
assembly = HexAssembly("testAssemblyType")
- assembly.spatialGrid = grids.axialUnitGrid(numCells=1)
+ assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)
assembly.spatialGrid.armiObject = assembly
assembly.add(_buildTestBlock("shield", "FakeMat", 25.0, 10.0))
assembly.calculateZCoords()
@@ -864,7 +897,8 @@ def test_coldAssemblyExpansion(self):
Notes
-----
For R_ARMI_INP_COLD_HEIGHT, the action of axial expansion occurs in setUp() during core
- construction, specifically in :py:meth:`constructAssem `
+ construction, specifically in
+ :py:meth:`constructAssem `
Two assertions here:
1. total assembly height should be preserved (through use of top dummy block)
@@ -913,14 +947,15 @@ def test_coldAssemblyExpansion(self):
def checkColdHeightBlockMass(
self, bStd: HexBlock, bExp: HexBlock, flagType: Flags, nuclide: str
):
- """Checks that nuclide masses for blocks with input cold heights and "inputHeightsConsideredHot": True are underpredicted.
+ """Checks that nuclide masses for blocks with input cold heights and
+ "inputHeightsConsideredHot": True are underpredicted.
Notes
-----
- If blueprints have cold blocks heights with "inputHeightsConsideredHot": True in the inputs, then
- the nuclide densities are thermally expanded but the block height is not. This ultimately results in
- nuclide masses being underpredicted relative to the case where both nuclide densities and block heights
- are thermally expanded.
+ If blueprints have cold blocks heights with "inputHeightsConsideredHot": True in the inputs,
+ then the nuclide densities are thermally expanded but the block height is not. This
+ ultimately results in nuclide masses being underpredicted relative to the case where both
+ nuclide densities and block heights are thermally expanded.
"""
# custom materials don't expand
if not isinstance(bStd.getComponent(flagType).material, custom.Custom):
@@ -976,7 +1011,8 @@ def runTest(
Notes
-----
- components "typeA" and "typeB" are assumed to be vertically stacked
- - two assertions: 1) comparing "typeB" component to "typeA"; 2) comparing "typeA" component to "typeB"
+ - two assertions: 1) comparing "typeB" component to "typeA"; 2) comparing "typeA" component
+ to "typeB"
- the different assertions are particularly useful for comparing two annuli
- to add Component class types to a test:
Add dictionary entry with following:
@@ -1128,7 +1164,7 @@ def buildTestAssemblyWithFakeMaterial(name: str, hot: bool = False):
height = 10.0 + 0.02 * (250.0 - 25.0)
assembly = HexAssembly("testAssemblyType")
- assembly.spatialGrid = grids.axialUnitGrid(numCells=1)
+ assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)
assembly.spatialGrid.armiObject = assembly
assembly.add(_buildTestBlock("shield", name, hotTemp, height))
assembly.add(_buildTestBlock("fuel", name, hotTemp, height))
diff --git a/armi/reactor/converters/tests/test_blockConverter.py b/armi/reactor/converters/tests/test_blockConverter.py
index 9218a73f0..5cb72f3dc 100644
--- a/armi/reactor/converters/tests/test_blockConverter.py
+++ b/armi/reactor/converters/tests/test_blockConverter.py
@@ -16,20 +16,65 @@
import os
import unittest
-import numpy
+import numpy as np
+from armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import (
+ isDepletable,
+)
+from armi.reactor import blocks, components, grids
from armi.reactor.converters import blockConverters
-from armi.reactor import blocks
-from armi.reactor import components
from armi.reactor.flags import Flags
from armi.reactor.tests.test_blocks import loadTestBlock
-from armi.reactor.tests.test_reactors import loadTestReactor, TEST_ROOT
+from armi.reactor.tests.test_reactors import TEST_ROOT, loadTestReactor
from armi.utils import hexagon
-from armi.reactor import grids
from armi.utils.directoryChangers import TemporaryDirectoryChanger
-from armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import (
- isDepletable,
-)
+
+
+def buildSimpleFuelBlockNegativeArea():
+ """
+ Return a simple block containing fuel, clad, duct, and coolant.
+
+ The block has a negative-area gap between fuel and cladding for testing.
+ """
+ b = blocks.HexBlock("fuel", height=10.0)
+
+ fuelDims = {"Tinput": 25, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0}
+ cladDims = {"Tinput": 25, "Thot": 600, "od": 0.80, "id": 0.76, "mult": 127.0}
+ ductDims = {"Tinput": 25, "Thot": 600, "op": 16, "ip": 15.3, "mult": 1.0}
+ intercoolantDims = {
+ "Tinput": 400,
+ "Thot": 400,
+ "op": 17.0,
+ "ip": ductDims["op"],
+ "mult": 1.0,
+ }
+ coolDims = {"Tinput": 25.0, "Thot": 400}
+
+ fuel = components.Circle("fuel", "UZr", **fuelDims)
+ clad = components.Circle("clad", "HT9", **cladDims)
+ gapDims = {
+ "Tinput": 25,
+ "Thot": 600,
+ "od": "clad.id",
+ "id": "fuel.od",
+ "mult": 127.0,
+ }
+ gapDims["components"] = {"fuel": fuel, "clad": clad}
+ gap = components.Circle("gap", "Void", **gapDims)
+ duct = components.Hexagon("duct", "HT9", **ductDims)
+ coolant = components.DerivedShape("coolant", "Sodium", **coolDims)
+ intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims)
+
+ b.add(fuel)
+ b.add(gap)
+ b.add(clad)
+ b.add(duct)
+ b.add(coolant)
+ b.add(intercoolant)
+
+ b.getVolumeFractions()
+
+ return b
class TestBlockConverter(unittest.TestCase):
@@ -81,6 +126,41 @@ def _test_dissolve(self, block, soluteName, solventName):
self.assertNotIn(soluteName, convertedBlock.getComponentNames())
self._checkAreaAndComposition(block, convertedBlock)
+ def test_dissolveMultiple(self):
+ """Test dissolving multiple components into another."""
+ self._test_dissolve_multi(loadTestBlock(), ["wire", "clad"], "coolant")
+ self._test_dissolve_multi(
+ loadTestBlock(), ["inner liner", "outer liner"], "clad"
+ )
+
+ def test_dissolveZeroArea(self):
+ """Test dissolving a zero-area component into another."""
+ self._test_dissolve(loadTestBlock(), "gap2", "outer liner")
+
+ def test_dissolveIntoZeroArea(self):
+ """Test dissolving a component into a zero-area solvent (raises ValueError)."""
+ with self.assertRaises(ValueError):
+ self._test_dissolve(loadTestBlock(), "outer liner", "gap2")
+
+ def test_dissolveNegativeArea(self):
+ """Test dissolving a zero-area component into another."""
+ with self.assertRaises(ValueError):
+ self._test_dissolve(buildSimpleFuelBlockNegativeArea(), "gap", "clad")
+
+ def test_dissolveIntoNegativeArea(self):
+ """Test dissolving a zero-area component into another."""
+ with self.assertRaises(ValueError):
+ self._test_dissolve(buildSimpleFuelBlockNegativeArea(), "clad", "gap")
+
+ def _test_dissolve_multi(self, block, soluteNames, solventName):
+ converter = blockConverters.MultipleComponentMerger(
+ block, soluteNames, solventName
+ )
+ convertedBlock = converter.convert()
+ for soluteName in soluteNames:
+ self.assertNotIn(soluteName, convertedBlock.getComponentNames())
+ self._checkAreaAndComposition(block, convertedBlock)
+
def test_build_NthRing(self):
"""Test building of one ring."""
RING = 6
@@ -326,7 +406,7 @@ class TestToCircles(unittest.TestCase):
def test_fromHex(self):
actualRadii = blockConverters.radiiFromHexPitches([7.47, 7.85, 8.15])
expected = [3.92203, 4.12154, 4.27906]
- self.assertTrue(numpy.allclose(expected, actualRadii, rtol=1e-5))
+ self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5))
def test_fromRingOfRods(self):
# JOYO-LMFR-RESR-001, rev 1, Table A.2, 5th layer (ring 6)
@@ -334,7 +414,7 @@ def test_fromRingOfRods(self):
0.76 * 5, 6 * 5, [0.28, 0.315]
)
expected = [3.24034, 3.28553, 3.62584, 3.67104]
- self.assertTrue(numpy.allclose(expected, actualRadii, rtol=1e-5))
+ self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5))
def _buildJoyoFuel():
diff --git a/armi/reactor/converters/tests/test_geometryConverters.py b/armi/reactor/converters/tests/test_geometryConverters.py
index 0e31b84c8..629092693 100644
--- a/armi/reactor/converters/tests/test_geometryConverters.py
+++ b/armi/reactor/converters/tests/test_geometryConverters.py
@@ -39,7 +39,7 @@ def setUp(self):
self.cs = self.o.cs
def test_addRing(self):
- """Tests that the addRing method adds the correct number of fuel assemblies to the test reactor."""
+ """Tests that ``addRing`` adds the correct number of fuel assemblies to the test reactor."""
converter = geometryConverters.FuelAssemNumModifier(self.cs)
converter.numFuelAssems = 7
converter.ringsToAdd = 1 * ["radial shield"]
@@ -48,7 +48,7 @@ def test_addRing(self):
numAssems = len(self.r.core.getAssemblies())
self.assertEqual(
numAssems, 13
- ) # should wind up with 6 reflector assemblies per 1/3rd core
+ ) # should end up with 6 reflector assemblies per 1/3rd Core
locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(4, 1)
shieldtype = self.r.core.childrenByLocator[locator].getType()
self.assertEqual(
@@ -64,7 +64,7 @@ def test_addRing(self):
) # should wind up with 11 reflector assemblies per 1/3rd core
def test_setNumberOfFuelAssems(self):
- """Tests that the setNumberOfFuelAssems method properly changes the number of fuel assemblies."""
+ """Tests that ``setNumberOfFuelAssems`` properly changes the number of fuel assemblies."""
# tests ability to add fuel assemblies
converter = geometryConverters.FuelAssemNumModifier(self.cs)
converter.numFuelAssems = 60
@@ -450,12 +450,12 @@ def test_initNewFullReactor(self):
self.assertEqual(newR.core.symmetry.domain, geometry.DomainType.FULL_CORE)
def test_skipGrowToFullCoreWhenAlreadyFullCore(self):
- """Test that hex core is not modified when third core to full core changer is called on an already full core geometry.
+ """Test that hex core is not modified when third core to full core changer is called on an
+ already full core geometry.
.. test: Convert a one-third core to full core and restore back to one-third core.
:id: T_ARMI_THIRD_TO_FULL_CORE2
:tests: R_ARMI_THIRD_TO_FULL_CORE
-
"""
# Check the initialization of the third core model and convert to a full core
self.assertFalse(self.r.core.isFullCore)
diff --git a/armi/reactor/converters/uniformMesh.py b/armi/reactor/converters/uniformMesh.py
index 8b8e3c129..80771e3e9 100644
--- a/armi/reactor/converters/uniformMesh.py
+++ b/armi/reactor/converters/uniformMesh.py
@@ -32,9 +32,9 @@
.. warning::
- This procedure can cause numerical diffusion in some cases. For example,
+ This procedure can cause numerical diffusion in some cases. For example,
if a control rod tip block has a large coolant block below it, things like peak
- absorption rate can get lost into it. We recalculate some but not all
+ absorption rate can get lost into it. We recalculate some but not all
reaction rates in the re-mapping process based on a flux remapping. To avoid this,
finer meshes will help. Always perform mesh sensitivity studies to ensure appropriate
convergence for your needs.
@@ -58,7 +58,7 @@
import collections
from timeit import default_timer as timer
-import numpy
+import numpy as np
import armi
from armi import runLog
@@ -186,8 +186,8 @@ def _computeAverageAxialMesh(self):
if len(aMesh) == refNumPoints:
allMeshes.append(aMesh)
- averageMesh = average1DWithinTolerance(numpy.array(allMeshes))
- self._commonMesh = numpy.array(averageMesh)
+ averageMesh = average1DWithinTolerance(np.array(allMeshes))
+ self._commonMesh = np.array(averageMesh)
def _decuspAxialMesh(self):
"""
@@ -254,7 +254,7 @@ def _decuspAxialMesh(self):
preference="top",
)
- self._commonMesh = numpy.array(combinedMesh)
+ self._commonMesh = np.array(combinedMesh)
def _filterMesh(
self, meshList, minimumMeshSize, anchorPoints, preference="bottom", warn=False
@@ -369,15 +369,16 @@ class UniformMeshGeometryConverter(GeometryConverter):
Notes
-----
There are several staticmethods available on this class that allow for:
+
- Creation of a new reactor without applying a new uniform axial mesh. See:
- ``
+ ``
- Creation of a new assembly with a new axial mesh applied. See:
- ``
+ ``
- Resetting the parameter state of an assembly back to the defaults for the
- provided block parameters. See:
- ``
+ provided block parameters. See:
+ ``
- Mapping number densities and block parameters between one assembly to
- another. See: ``
+ another. See: ``
This class is meant to be extended for specific physics calculations that require a
uniform mesh. The child types of this class should define custom
@@ -390,9 +391,9 @@ class UniformMeshGeometryConverter(GeometryConverter):
is being applied to prevent the numerical diffusion problem.
- "in" is used when mapping parameters into the uniform assembly
- from the non-uniform assembly.
+ from the non-uniform assembly.
- "out" is used when mapping parameters from the uniform assembly back
- to the non-uniform assembly.
+ to the non-uniform assembly.
.. warning::
If a parameter is calculated by a physics solver while the reactor is in its
@@ -1027,7 +1028,7 @@ def _checkConversion(self):
@staticmethod
def _createNewAssembly(sourceAssembly):
a = sourceAssembly.__class__(sourceAssembly.getType())
- a.spatialGrid = grids.axialUnitGrid(len(sourceAssembly))
+ a.spatialGrid = grids.AxialGrid.fromNCells(len(sourceAssembly))
a.setName(sourceAssembly.getName())
return a
@@ -1391,7 +1392,7 @@ def paramSetter(block, vals, paramNames):
if val is None:
continue
- if isinstance(val, (tuple, list, numpy.ndarray)):
+ if isinstance(val, (tuple, list, np.ndarray)):
ParamMapper._arrayParamSetter(block, [val], [paramName])
else:
ParamMapper._scalarParamSetter(block, [val], [paramName])
@@ -1402,12 +1403,12 @@ def paramGetter(self, block, paramNames):
for paramName in paramNames:
val = block.p[paramName]
# list-like should be treated as a numpy array
- if isinstance(val, (tuple, list, numpy.ndarray)):
- paramVals.append(numpy.array(val) if len(val) > 0 else None)
+ if isinstance(val, (tuple, list, np.ndarray)):
+ paramVals.append(np.array(val) if len(val) > 0 else None)
else:
paramVals.append(val)
- return numpy.array(paramVals, dtype=object)
+ return np.array(paramVals, dtype=object)
@staticmethod
def _scalarParamSetter(block, vals, paramNames):
@@ -1421,7 +1422,7 @@ def _arrayParamSetter(block, arrayVals, paramNames):
for paramName, vals in zip(paramNames, arrayVals):
if vals is None:
continue
- block.p[paramName] = numpy.array(vals)
+ block.p[paramName] = np.array(vals)
def setNumberDensitiesFromOverlaps(block, overlappingBlockInfo):
diff --git a/armi/reactor/flags.py b/armi/reactor/flags.py
index 6366432a8..3386772f9 100644
--- a/armi/reactor/flags.py
+++ b/armi/reactor/flags.py
@@ -185,7 +185,8 @@ def _toString(cls, typeSpec):
-----
This converts a flag from ``Flags.A|B`` to ``'A B'``
"""
- return str(typeSpec).split("{}.".format(cls.__name__))[1].replace("|", " ")
+ strings = str(typeSpec).split("{}.".format(cls.__name__))[1]
+ return " ".join(sorted(strings.split("|")))
class Flags(Flag):
@@ -205,9 +206,6 @@ class Flags(Flag):
MEDIUM = auto()
LOW = auto()
- CORE = auto()
- REACTOR = auto()
-
# general kinds of assemblies or blocks
MATERIAL = auto()
FUEL = auto()
diff --git a/armi/reactor/grids/__init__.py b/armi/reactor/grids/__init__.py
index 5daf84c68..fa6fe3c07 100644
--- a/armi/reactor/grids/__init__.py
+++ b/armi/reactor/grids/__init__.py
@@ -80,7 +80,7 @@
from armi.reactor.grids.grid import Grid
from armi.reactor.grids.structuredGrid import StructuredGrid, GridParameters, _tuplify
-from armi.reactor.grids.axial import AxialGrid, axialUnitGrid
+from armi.reactor.grids.axial import AxialGrid
from armi.reactor.grids.cartesian import CartesianGrid
from armi.reactor.grids.hexagonal import HexGrid, COS30, SIN30, TRIANGLES_IN_HEXAGON
from armi.reactor.grids.thetarz import ThetaRZGrid, TAU
diff --git a/armi/reactor/grids/axial.py b/armi/reactor/grids/axial.py
index b297c4596..4cde443f1 100644
--- a/armi/reactor/grids/axial.py
+++ b/armi/reactor/grids/axial.py
@@ -12,9 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, TYPE_CHECKING, NoReturn
-import warnings
-import numpy
+import numpy as np
from armi.reactor.grids.locations import IJType, LocationBase
from armi.reactor.grids.structuredGrid import StructuredGrid
@@ -26,7 +25,7 @@
class AxialGrid(StructuredGrid):
"""1-D grid in the k-direction (z).
- .. note:::
+ .. note::
It is recommended to use :meth:`fromNCells` rather than calling
the ``__init_`` constructor directly
@@ -45,7 +44,7 @@ def fromNCells(
"""
# Need float bounds or else we truncate integers
return cls(
- bounds=(None, None, numpy.arange(numCells + 1, dtype=numpy.float64)),
+ bounds=(None, None, np.arange(numCells + 1, dtype=np.float64)),
armiObject=armiObject,
)
@@ -85,22 +84,3 @@ def pitch(self) -> float:
Pitch in cm
"""
-
-
-def axialUnitGrid(
- numCells: int, armiObject: Optional["ArmiObject"] = None
-) -> AxialGrid:
- """
- Build a 1-D unit grid in the k-direction based on a number of times. Each mesh is 1cm wide.
-
- .. deprecated::
-
- Use :class:`AxialUnitGrid` class instead
-
- """
- warnings.warn(
- "Use grids.AxialGrid class rather than function",
- PendingDeprecationWarning,
- stacklevel=2,
- )
- return AxialGrid.fromNCells(numCells, armiObject)
diff --git a/armi/reactor/grids/cartesian.py b/armi/reactor/grids/cartesian.py
index 6de012184..4971e8ee0 100644
--- a/armi/reactor/grids/cartesian.py
+++ b/armi/reactor/grids/cartesian.py
@@ -14,10 +14,9 @@
import itertools
from typing import Optional, NoReturn, Tuple
-import numpy
+import numpy as np
from armi.reactor import geometry
-
from armi.reactor.grids.locations import IJType
from armi.reactor.grids.structuredGrid import StructuredGrid
@@ -94,7 +93,7 @@ def fromRectangle(
An object in a Composite model that the Grid should be bound to.
"""
unitSteps = ((width, 0.0, 0.0), (0.0, height, 0.0), (0, 0, 0))
- offset = numpy.array((width / 2.0, height / 2.0, 0.0)) if isOffset else None
+ offset = np.array((width / 2.0, height / 2.0, 0.0)) if isOffset else None
return cls(
unitSteps=unitSteps,
unitStepLimits=((-numRings, numRings), (-numRings, numRings), (0, 1)),
@@ -228,12 +227,12 @@ def changePitch(self, xw: float, yw: float):
"""
xwOld = self._unitSteps[0][0]
ywOld = self._unitSteps[1][1]
- self._unitSteps = numpy.array(((xw, 0.0, 0.0), (0.0, yw, 0.0), (0, 0, 0)))[
+ self._unitSteps = np.array(((xw, 0.0, 0.0), (0.0, yw, 0.0), (0, 0, 0)))[
self._stepDims
]
newOffsetX = self._offset[0] * xw / xwOld
newOffsetY = self._offset[1] * yw / ywOld
- self._offset = numpy.array((newOffsetX, newOffsetY, 0.0))
+ self._offset = np.array((newOffsetX, newOffsetY, 0.0))
def getSymmetricEquivalents(self, indices):
symmetry = self.symmetry # construct the symmetry object once up top
diff --git a/armi/reactor/grids/grid.py b/armi/reactor/grids/grid.py
index 6961816b5..fac584260 100644
--- a/armi/reactor/grids/grid.py
+++ b/armi/reactor/grids/grid.py
@@ -14,10 +14,9 @@
from abc import ABC, abstractmethod
from typing import Union, Optional, Hashable, TYPE_CHECKING, Dict, Iterable, Tuple, List
-import numpy
+import numpy as np
from armi.reactor import geometry
-
from armi.reactor.grids.locations import LocationBase, IndexLocation, IJType, IJKType
if TYPE_CHECKING:
@@ -226,7 +225,7 @@ def getCoordinates(
self,
indices: Union[IJKType, List[IJKType]],
nativeCoords: bool = False,
- ) -> numpy.ndarray:
+ ) -> np.ndarray:
pass
@abstractmethod
@@ -238,11 +237,11 @@ def restoreBackup(self):
"""Restore state from backup."""
@abstractmethod
- def getCellBase(self, indices: IJKType) -> numpy.ndarray:
+ def getCellBase(self, indices: IJKType) -> np.ndarray:
"""Return the lower left case of this cell in cm."""
@abstractmethod
- def getCellTop(self, indices: IJKType) -> numpy.ndarray:
+ def getCellTop(self, indices: IJKType) -> np.ndarray:
"""Get the upper right of this cell in cm."""
@staticmethod
diff --git a/armi/reactor/grids/hexagonal.py b/armi/reactor/grids/hexagonal.py
index d3c75a540..76465d82f 100644
--- a/armi/reactor/grids/hexagonal.py
+++ b/armi/reactor/grids/hexagonal.py
@@ -14,11 +14,9 @@
from math import sqrt
from typing import Tuple, List, Optional
-import numpy
+import numpy as np
from armi.reactor import geometry
-from armi.utils import hexagon
-
from armi.reactor.grids.constants import (
BOUNDARY_0_DEGREES,
BOUNDARY_120_DEGREES,
@@ -27,11 +25,12 @@
)
from armi.reactor.grids.locations import IJKType, IJType
from armi.reactor.grids.structuredGrid import StructuredGrid
+from armi.utils import hexagon
COS30 = sqrt(3) / 2.0
SIN30 = 1.0 / 2.0
# going counter-clockwise from "position 1" (top right)
-TRIANGLES_IN_HEXAGON = numpy.array(
+TRIANGLES_IN_HEXAGON = np.array(
[
(+COS30, SIN30),
(+0, 1.0),
@@ -64,43 +63,44 @@ class HexGrid(StructuredGrid):
Notes
-----
In an axial plane (i, j) are as follows (flats up)::
- _____
- / \
- _____/ 0,1 \_____
- / \ / \
- / -1,1 \_____/ 1,0 \
- \ / \ /
- \_____/ 0,0 \_____/
- / \ / \
- / -1,0 \_____/ 1,-1 \
- \ / \ /
- \_____/ 0,-1 \_____/
- \ /
- \_____/
+
+ _____
+ / \
+ _____/ 0,1 \_____
+ / \ / \
+ / -1,1 \_____/ 1,0 \
+ \ / \ /
+ \_____/ 0,0 \_____/
+ / \ / \
+ / -1,0 \_____/ 1,-1 \
+ \ / \ /
+ \_____/ 0,-1 \_____/
+ \ /
+ \_____/
In an axial plane (i, j) are as follows (corners up)::
- / \ / \
- / \ / \
- | 0,1 | 1,0 |
- | | |
- / \ / \ / \
- / \ / \ / \
- | -1,1 | 0,0 | 1,-1 |
- | | | |
- \ / \ / \ /
- \ / \ / \ /
- | -1,0 | 0,-1 |
- | | |
- \ / \ /
- \ / \ /
+ / \ / \
+ / \ / \
+ | 0,1 | 1,0 |
+ | | |
+ / \ / \ / \
+ / \ / \ / \
+ | -1,1 | 0,0 | 1,-1 |
+ | | | |
+ \ / \ / \ /
+ \ / \ / \ /
+ | -1,0 | 0,-1 |
+ | | |
+ \ / \ /
+ \ / \ /
Basic hexagon geometry::
- - pitch = sqrt(3) * side
- - long diagonal = 2 * side
- - Area = (sqrt(3) / 4) * side^2
- - perimeter = 6 * side
+ - pitch = sqrt(3) * side
+ - long diagonal = 2 * side
+ - Area = (sqrt(3) / 4) * side^2
+ - perimeter = 6 * side
"""
@@ -348,22 +348,22 @@ def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType:
-----
In an axial plane, the (ring, position) coordinates are as follows::
- Flat-to-Flat Corners Up
- _____
- / \ / \ / \
- _____/ 2,2 \_____ / \ / \
- / \ / \ | 2,2 | 2,1 |
- / 2,3 \_____/ 2,1 \ | | |
- \ / \ / / \ / \ / \
- \_____/ 1,1 \_____/ / \ / \ / \
- / \ / \ | 2,3 | 1,1 | 2,6 |
- / 2,4 \_____/ 2,6 \ | | | |
- \ / \ / \ / \ / \ /
- \_____/ 2,5 \_____/ \ / \ / \ /
- \ / | 2,4 | 2,5 |
- \_____/ | | |
- \ / \ /
- \ / \ /
+ Flat-to-Flat Corners Up
+ _____
+ / \ / \ / \
+ _____/ 2,2 \_____ / \ / \
+ / \ / \ | 2,2 | 2,1 |
+ / 2,3 \_____/ 2,1 \ | | |
+ \ / \ / / \ / \ / \
+ \_____/ 1,1 \_____/ / \ / \ / \
+ / \ / \ | 2,3 | 1,1 | 2,6 |
+ / 2,4 \_____/ 2,6 \ | | | |
+ \ / \ / \ / \ / \ /
+ \_____/ 2,5 \_____/ \ / \ / \ /
+ \ / | 2,4 | 2,5 |
+ \_____/ | | |
+ \ / \ /
+ \ / \ /
"""
i, j, _edge = HexGrid._indicesAndEdgeFromRingAndPos(ring, pos)
@@ -455,7 +455,7 @@ def _getSymmetricIdenticalsThird(indices) -> List[IJType]:
identicals = [(-i - j, i), (j, -i - j)]
return identicals
- def triangleCoords(self, indices: IJKType) -> numpy.ndarray:
+ def triangleCoords(self, indices: IJKType) -> np.ndarray:
"""
Return 6 coordinate pairs representing the centers of the 6 triangles in a
hexagon centered here.
@@ -500,7 +500,7 @@ def _getRawUnitSteps(pitch, cornersUp=False):
def changePitch(self, newPitchCm: float):
"""Change the hex pitch."""
- unitSteps = numpy.array(HexGrid._getRawUnitSteps(newPitchCm, self.cornersUp))
+ unitSteps = np.array(HexGrid._getRawUnitSteps(newPitchCm, self.cornersUp))
self._unitSteps = unitSteps[self._stepDims]
def locatorInDomain(self, locator, symmetryOverlap: Optional[bool] = False) -> bool:
@@ -540,10 +540,7 @@ def isInFirstThird(self, locator, includeTopEdge=False) -> bool:
# Even ring; upper edge assem included.
maxPos2 += 1
- if pos <= maxPos1 or pos >= maxPos2:
- return True
-
- return False
+ return bool(pos <= maxPos1 or pos >= maxPos2)
def generateSortedHexLocationList(self, nLocs: int):
"""
@@ -568,7 +565,7 @@ def generateSortedHexLocationList(self, nLocs: int):
# round to avoid differences due to floating point math
locList.sort(
key=lambda loc: (
- round(numpy.linalg.norm(loc.getGlobalCoordinates()), 6),
+ round(np.linalg.norm(loc.getGlobalCoordinates()), 6),
loc.i,
loc.j,
)
diff --git a/armi/reactor/grids/locations.py b/armi/reactor/grids/locations.py
index 9bfff1f76..ca9e93e1b 100644
--- a/armi/reactor/grids/locations.py
+++ b/armi/reactor/grids/locations.py
@@ -16,7 +16,7 @@
from abc import ABC, abstractmethod
import math
-import numpy
+import numpy as np
if TYPE_CHECKING:
# Avoid some circular imports
@@ -158,7 +158,7 @@ def associate(self, grid: "Grid"):
@property
@abstractmethod
- def indices(self) -> numpy.ndarray:
+ def indices(self) -> np.ndarray:
"""Get the non-grid indices (i,j,k) of this locator.
This strips off the annoying ``grid`` tagalong which is there to ensure proper
@@ -239,7 +239,7 @@ def parentLocation(self):
return None
@property
- def indices(self) -> numpy.ndarray:
+ def indices(self) -> np.ndarray:
"""
Get the non-grid indices (i,j,k) of this locator.
@@ -253,7 +253,7 @@ def indices(self) -> numpy.ndarray:
2. It can be written/read from the database.
"""
- return numpy.array(self[:3])
+ return np.array(self[:3])
def getCompleteIndices(self) -> IJKType:
"""
@@ -339,8 +339,8 @@ def distanceTo(self, other: "IndexLocation") -> float:
return math.sqrt(
(
(
- numpy.array(self.getGlobalCoordinates())
- - numpy.array(other.getGlobalCoordinates())
+ np.array(self.getGlobalCoordinates())
+ - np.array(other.getGlobalCoordinates())
)
** 2
).sum()
@@ -429,7 +429,7 @@ def pop(self, location: IndexLocation):
self._locations.pop(location)
@property
- def indices(self) -> List[numpy.ndarray]:
+ def indices(self) -> List[np.ndarray]:
"""
Return indices for all locations.
diff --git a/armi/reactor/grids/structuredGrid.py b/armi/reactor/grids/structuredGrid.py
index 81d74add4..437bb5b66 100644
--- a/armi/reactor/grids/structuredGrid.py
+++ b/armi/reactor/grids/structuredGrid.py
@@ -15,7 +15,7 @@
import itertools
from typing import Tuple, Union, List, Iterable, Optional, Sequence
-import numpy
+import numpy as np
from armi.reactor.grids.locations import (
IJKType,
@@ -170,8 +170,8 @@ def __init__(
# only represent unit steps in dimensions they're being used so as to not
# pollute the dot product. This may reduce the length of this from 3 to 2 or 1
- self._unitSteps = numpy.array(unitSteps)[self._stepDims]
- self._offset = numpy.zeros(3) if offset is None else numpy.array(offset)
+ self._unitSteps = np.array(unitSteps)[self._stepDims]
+ self._offset = np.zeros(3) if offset is None else np.array(offset)
self._locations = {}
self._buildLocations() # locations are owned by a grid, so the grid builds them.
@@ -219,12 +219,12 @@ def reduce(self) -> GridParameters:
)
@property
- def offset(self) -> numpy.ndarray:
+ def offset(self) -> np.ndarray:
"""Offset in cm for each axis."""
return self._offset
@offset.setter
- def offset(self, offset: numpy.ndarray):
+ def offset(self, offset: np.ndarray):
self._offset = offset
def __repr__(self) -> str:
@@ -289,7 +289,7 @@ def backUp(self):
def restoreBackup(self):
self._unitSteps, self._bounds, self._offset = self._backup
- def getCoordinates(self, indices, nativeCoords=False) -> numpy.ndarray:
+ def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray:
"""Return the coordinates of the center of the mesh cell at the given indices
in cm.
@@ -308,26 +308,26 @@ def getCoordinates(self, indices, nativeCoords=False) -> numpy.ndarray:
all axes. There are no more complicated situations where we need to find
the centroid of a octagon on a rectangular mesh, or the like.
"""
- indices = numpy.array(indices)
+ indices = np.array(indices)
return self._evaluateMesh(
indices, self._centroidBySteps, self._centroidByBounds
)
- def getCellBase(self, indices) -> numpy.ndarray:
+ def getCellBase(self, indices) -> np.ndarray:
"""Get the mesh base (lower left) of this mesh cell in cm."""
- indices = numpy.array(indices)
+ indices = np.array(indices)
return self._evaluateMesh(
indices, self._meshBaseBySteps, self._meshBaseByBounds
)
- def getCellTop(self, indices) -> numpy.ndarray:
+ def getCellTop(self, indices) -> np.ndarray:
"""Get the mesh top (upper right) of this mesh cell in cm."""
- indices = numpy.array(indices) + 1
+ indices = np.array(indices) + 1
return self._evaluateMesh(
indices, self._meshBaseBySteps, self._meshBaseByBounds
)
- def _evaluateMesh(self, indices, stepOperator, boundsOperator) -> numpy.ndarray:
+ def _evaluateMesh(self, indices, stepOperator, boundsOperator) -> np.ndarray:
"""
Evaluate some function of indices on this grid.
@@ -346,17 +346,17 @@ def _evaluateMesh(self, indices, stepOperator, boundsOperator) -> numpy.ndarray:
boundCoords.append(boundsOperator(indices[ii], bounds))
# limit step operator to the step dimensions
- stepCoords = stepOperator(numpy.array(indices)[self._stepDims])
+ stepCoords = stepOperator(np.array(indices)[self._stepDims])
# now mix/match bounds coords with step coords appropriately.
- result = numpy.zeros(len(indices))
+ result = np.zeros(len(indices))
result[self._stepDims] = stepCoords
result[self._boundDims] = boundCoords
return result + self._offset
def _centroidBySteps(self, indices):
- return numpy.dot(self._unitSteps, indices)
+ return np.dot(self._unitSteps, indices)
def _meshBaseBySteps(self, indices):
return (
@@ -515,9 +515,9 @@ def pitch(self) -> Union[float, Tuple[float, float]]:
def _tuplify(maybeArray) -> tuple:
- if isinstance(maybeArray, (numpy.ndarray, list, tuple)):
+ if isinstance(maybeArray, (np.ndarray, list, tuple)):
maybeArray = tuple(
- tuple(row) if isinstance(row, (numpy.ndarray, list)) else row
+ tuple(row) if isinstance(row, (np.ndarray, list)) else row
for row in maybeArray
)
diff --git a/armi/reactor/grids/tests/test_grids.py b/armi/reactor/grids/tests/test_grids.py
index 99b5934a1..26b5a334b 100644
--- a/armi/reactor/grids/tests/test_grids.py
+++ b/armi/reactor/grids/tests/test_grids.py
@@ -18,7 +18,7 @@
import unittest
import pickle
-import numpy
+import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from armi.reactor import geometry
@@ -108,7 +108,7 @@ def test_recursion(self):
pinIndexLoc = grids.IndexLocation(1, 5, 0, blockGrid)
pinFree = grids.CoordinateLocation(1.0, 2.0, 3.0, blockGrid)
- assert_allclose(blockLoc.getCompleteIndices(), numpy.array((2, 3, 3)))
+ assert_allclose(blockLoc.getCompleteIndices(), np.array((2, 3, 3)))
assert_allclose(blockLoc.getGlobalCoordinates(), (2.0, 3.0, 3.5))
assert_allclose(blockLoc.getGlobalCellBase(), (1.5, 2.5, 3))
assert_allclose(blockLoc.getGlobalCellTop(), (2.5, 3.5, 4))
@@ -457,7 +457,7 @@ def test_adjustPitchFlatsUp(self):
grid = grids.HexGrid(
unitSteps=((1.5 / math.sqrt(3), 0.0, 0.0), (0.5, 1, 0.0), (0, 0, 0)),
unitStepLimits=((-3, 3), (-3, 3), (0, 1)),
- offset=numpy.array([offset, offset, offset]),
+ offset=np.array([offset, offset, offset]),
)
# test number of rings before converting pitch
@@ -499,7 +499,7 @@ def test_adjustPitchCornersUp(self):
(0, 0, 0),
),
unitStepLimits=((-3, 3), (-3, 3), (0, 1)),
- offset=numpy.array(offsets),
+ offset=np.array(offsets),
)
# test number of rings before converting pitch
@@ -510,7 +510,7 @@ def test_adjustPitchCornersUp(self):
grid.changePitch(2.0)
self.assertAlmostEqual(grid.pitch, 2.0, delta=1e-9)
v2 = grid.getCoordinates((1, 0, 0))
- correction = numpy.array([0.5, math.sqrt(3) / 2, 0])
+ correction = np.array([0.5, math.sqrt(3) / 2, 0])
assert_allclose(v1 + correction, v2)
# basic sanity: test number of rings has not changed
@@ -625,7 +625,7 @@ class TestThetaRZGrid(unittest.TestCase):
def test_positions(self):
grid = grids.ThetaRZGrid(
- bounds=(numpy.linspace(0, 2 * math.pi, 13), [0, 2, 2.5, 3], [0, 10, 20, 30])
+ bounds=(np.linspace(0, 2 * math.pi, 13), [0, 2, 2.5, 3], [0, 10, 20, 30])
)
assert_allclose(
grid.getCoordinates((1, 0, 1)), (math.sqrt(2) / 2, math.sqrt(2) / 2, 15.0)
diff --git a/armi/reactor/grids/thetarz.py b/armi/reactor/grids/thetarz.py
index 27148ae98..7f9582d4c 100644
--- a/armi/reactor/grids/thetarz.py
+++ b/armi/reactor/grids/thetarz.py
@@ -14,7 +14,7 @@
import math
from typing import TYPE_CHECKING, Optional, NoReturn
-import numpy
+import numpy as np
from armi.reactor.grids.locations import IJType, IJKType
from armi.reactor.grids.structuredGrid import StructuredGrid
@@ -75,8 +75,8 @@ def fromGeom(cls, geom, armiObject: Optional["ArmiObject"] = None) -> "ThetaRZGr
radii.add(rad2)
thetas.add(theta1)
thetas.add(theta2)
- radii = numpy.array(sorted(radii), dtype=numpy.float64)
- thetaRadians = numpy.array(sorted(thetas), dtype=numpy.float64)
+ radii = np.array(sorted(radii), dtype=np.float64)
+ thetaRadians = np.array(sorted(thetas), dtype=np.float64)
return ThetaRZGrid(
bounds=(thetaRadians, radii, (0.0, 0.0)), armiObject=armiObject
@@ -89,7 +89,7 @@ def getRingPos(self, indices):
def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType:
return (pos - 1, ring - 1)
- def getCoordinates(self, indices, nativeCoords=False) -> numpy.ndarray:
+ def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray:
meshCoords = theta, r, z = super().getCoordinates(
indices, nativeCoords=nativeCoords
)
@@ -100,7 +100,7 @@ def getCoordinates(self, indices, nativeCoords=False) -> numpy.ndarray:
return meshCoords
else:
# return x, y ,z
- return numpy.array((r * math.cos(theta), r * math.sin(theta), z))
+ return np.array((r * math.cos(theta), r * math.sin(theta), z))
def indicesOfBounds(
self,
@@ -131,8 +131,8 @@ def indicesOfBounds(
-------
tuple : i, j, k of given bounds
"""
- i = int(numpy.abs(self._bounds[0] - theta0).argmin())
- j = int(numpy.abs(self._bounds[1] - rad0).argmin())
+ i = int(np.abs(self._bounds[0] - theta0).argmin())
+ j = int(np.abs(self._bounds[1] - rad0).argmin())
return (i, j, 0)
diff --git a/armi/reactor/parameters/__init__.py b/armi/reactor/parameters/__init__.py
index d03773a20..16bf2813b 100644
--- a/armi/reactor/parameters/__init__.py
+++ b/armi/reactor/parameters/__init__.py
@@ -71,6 +71,17 @@
>>> b.p.fuelTemp = numpy.array(range(217), dtype=float)
>>> b.p.fuelTemp[58] = 600
+The parameter attributes can be access via the ``paramDefs`` property. Perhaps a user is
+curious about the units of a block parameter:
+
+ >>> defs = b.p.paramDefs
+ >>> defs["heightBOL"]
+
+
+ # Or, more simply:
+ >>> defs["heightBOL"].units
+ 'cm'
+
.. note::
There have been many discussions on what the specific name of this module/system
diff --git a/armi/reactor/parameters/parameterCollections.py b/armi/reactor/parameters/parameterCollections.py
index 0163a416e..44cc06d30 100644
--- a/armi/reactor/parameters/parameterCollections.py
+++ b/armi/reactor/parameters/parameterCollections.py
@@ -14,10 +14,10 @@
import copy
import pickle
-from typing import Any, Optional, List, Set
+from typing import Any, Optional, List, Set, Iterator, Callable
import sys
-import numpy
+import numpy as np
import six
from armi import runLog
@@ -35,13 +35,13 @@
"""
The serial number for all ParameterCollections
-This is a counter of the number of instances of all types. They are useful for tracking
-items through the history of a database.
+This is a counter of the number of instances of all types. They are useful for tracking items
+through the history of a database.
.. warning::
- This is not MPI safe. We also have not done anything to make it thread safe,
- except that the GIL exists.
+ This is not MPI safe. We also have not done anything to make it thread safe,
+ except that the GIL exists.
"""
@@ -70,8 +70,8 @@ class _ParameterCollectionType(type):
"""
Simple metaclass to make sure that expected class attributes are present.
- These attributes shouldn't be shared among different subclasses, so this
- makes sure that each subclass gets its own.
+ These attributes shouldn't be shared among different subclasses, so this makes sure that each
+ subclass gets its own.
"""
def __new__(mcl, name, bases, attrs):
@@ -85,15 +85,13 @@ def __new__(mcl, name, bases, attrs):
class ParameterCollection(metaclass=_ParameterCollectionType):
"""An empty class for holding state information in the ARMI data structure.
- A parameter collection stores one or more formally-defined values ("parameters").
- Until a given ParameterCollection subclass has been instantiated, new parameters may
- be added to its parameter definitions (e.g., from plugins). Upon first
- instantiation, ``applyParameters()`` will be called, binding the parameter
- definitions to the Collection class as descriptors.
+ A parameter collection stores one or more formally-defined values ("parameters"). Until a given
+ ParameterCollection subclass has been instantiated, new parameters may be added to its parameter
+ definitions (e.g., from plugins). Upon first instantiation, ``applyParameters()`` will be
+ called, binding the parameter definitions to the Collection class as descriptors.
- It is illegal to redefine a parameter with the same name in the same class, or its
- subclasses, and attempting to do so should result in exceptions in
- ``applyParameters()``.
+ It is illegal to redefine a parameter with the same name in the same class, or its subclasses,
+ and attempting to do so should result in exceptions in ``applyParameters()``.
Attributes
----------
@@ -104,13 +102,12 @@ class ParameterCollection(metaclass=_ParameterCollectionType):
Keys are ``(paramName, timeStep)``.
assigned : int Flag
- indicates the synchronization state of the parameter collection. This is used to
- reduce the amount of information that is transmitted during database, and MPI
- operations as well as determine the collection's state when exiting a
- ``Composite.retainState``.
+ indicates the synchronization state of the parameter collection. This is used to reduce the
+ amount of information that is transmitted during database, and MPI operations as well as
+ determine the collection's state when exiting a ``Composite.retainState``.
- This attribute when used with the ``Parameter.assigned`` attribute allows us to
- efficiently perform many operations.
+ This attribute when used with the ``Parameter.assigned`` attribute allows us to efficiently
+ perform many operations.
See Also
--------
@@ -122,12 +119,20 @@ class ParameterCollection(metaclass=_ParameterCollectionType):
)
_allFields: List[str] = []
- # The ArmiObject class that this ParameterCollection belongs to
_ArmiObject = None
+ """The ArmiObject class that this ParameterCollection belongs to.
- # A set of all instance attributes that are settable on an instance. This prevents
- # inadvertent setting of values that aren't proper parameters. Named _slots, as
- # it is used to emulate some of the behaviors of __slots__.
+ Crucially **not** the instance that owns this collection. For any
+ ``ArmiObject``, the following are true::
+
+ >>> self.p._ArmiObject is not self
+ >>> isinstance(self, self.p._ArmiObject)
+
+ """
+
+ # A set of all instance attributes that are settable on an instance. This prevents inadvertent
+ # setting of values that aren't proper parameters. Named _slots, as it is used to emulate some
+ # of the behaviors of __slots__.
_slots: Set[str] = set()
def __init__(self, _state: Optional[List[Any]] = None):
@@ -362,7 +367,7 @@ def __contains__(self, name):
else:
return name in self._hist
- def __eq__(self, other):
+ def __eq__(self, other: "ParameterCollection"):
if not isinstance(other, self.__class__):
return False
@@ -377,7 +382,8 @@ def __eq__(self, other):
return True
- def __iter__(self):
+ def __iter__(self) -> Iterator[str]:
+ """Iterate over names of assigned parameters define on this collection."""
return (
pd.name
for pd in self.paramDefs
@@ -425,7 +431,7 @@ def paramDefs(self) -> parameterDefinitions.ParameterDefinitionCollection:
Get the :py:class:`ParameterDefinitionCollection` associated with this instance.
This serves as both an alias for the pDefs class attribute, and as a read-only
- accessor for them. Most non-paramter-system related interactions with an
+ accessor for them. Most non-parameter-system related interactions with an
object's ``ParameterCollection`` should go through this. In the future, it
probably makes sense to make the ``pDefs`` that the ``applyDefinitions`` and
``ResolveParametersMeta`` things are sensitive to more hidden from outside the
@@ -484,8 +490,8 @@ def restoreBackup(self, paramsToApply):
for pd, currentValue in currentData.items():
# correct for global paramDef.assigned assumption
retainedValue = getattr(self, pd.fieldName)
- if isinstance(retainedValue, numpy.ndarray) or isinstance(
- currentValue, numpy.ndarray
+ if isinstance(retainedValue, np.ndarray) or isinstance(
+ currentValue, np.ndarray
):
if (retainedValue != currentValue).any():
setattr(self, pd.fieldName, currentValue)
@@ -496,6 +502,32 @@ def restoreBackup(self, paramsToApply):
pd.assigned = SINCE_ANYTHING
self.assigned = SINCE_ANYTHING
+ def where(
+ self, f: Callable[[parameterDefinitions.Parameter], bool]
+ ) -> Iterator[parameterDefinitions.Parameter]:
+ """Produce an iterator over parameters that meet some criteria.
+
+ Parameters
+ ----------
+ f : callable function f(parameter) -> bool
+ Function to check if a parameter should be fetched during the iteration.
+
+ Returns
+ -------
+ iterator of :class:`armi.reactor.parameters.Parameter`
+ Iterator, **not** list or tuple, that produces each parameter that
+ meets ``f(parameter) == True``.
+
+ Examples
+ --------
+ >>> block = r.core[0][0]
+ >>> pdef = block.p.paramDefs
+ >>> for param in pdef.where(lambda pd: pd.atLocation(ParamLocation.EDGES)):
+ ... print(param.name, block.p[param.name])
+
+ """
+ return filter(f, self.paramDefs)
+
def collectPluginParameters(pm):
"""Apply parameters from plugins to their respective object classes."""
diff --git a/armi/reactor/parameters/parameterDefinitions.py b/armi/reactor/parameters/parameterDefinitions.py
index a1d6c3d88..7ad5ccec0 100644
--- a/armi/reactor/parameters/parameterDefinitions.py
+++ b/armi/reactor/parameters/parameterDefinitions.py
@@ -15,12 +15,11 @@
r"""
This module contains the code necessary to represent parameter definitions.
-``ParameterDefinition``\ s are the metadata that describe specific parameters, and aid in
-enforcing certain rules upon the parameters themselves and the parameter collections
-that contain them.
+``ParameterDefinition``\ s are the metadata that describe specific parameters, and aid in enforcing
+certain rules upon the parameters themselves and the parameter collections that contain them.
-This module also describes the ``ParameterDefinitionCollection`` class, which serves as
-a specialized container to manage related parameter definitions.
+This module also describes the ``ParameterDefinitionCollection`` class, which serves as a
+specialized container to manage related parameter definitions.
See Also
--------
@@ -31,9 +30,8 @@
import functools
import re
-import numpy
+import numpy as np
-from armi import runLog
from armi.reactor.flags import Flags
from armi.reactor.parameters.exceptions import ParameterError, ParameterDefinitionError
@@ -42,8 +40,7 @@
# Note that the various operations are responsible for clearing the flags on the events.
# These should be interpreted as:
# The Parameter or ParameterCollection has been modified SINCE_
-# In order for that to happen, the flags need to be cleared when the
-# begins.
+# In order for that to happen, the flags need to be cleared when the begins.
SINCE_INITIALIZATION = 1
SINCE_LAST_DISTRIBUTE_STATE = 4
SINCE_LAST_GEOMETRY_TRANSFORMATION = 8
@@ -69,9 +66,12 @@ class Category:
* `fluxQuantities` parameters are related to neutron or gamma flux
* `neutronics` parameters are calculated in a neutronics global flux solve
* `gamma` parameters are calculated in a fixed-source gamma solve
- * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the uniform mesh back to the non-uniform mesh
- * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics parameters for kinetics solutions
- * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow rates, temperatures, etc.)
+ * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the
+ uniform mesh back to the non-uniform mesh
+ * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics
+ parameters for kinetics solutions
+ * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow
+ rates, temperatures, etc.)
"""
depletion = "depletion"
@@ -104,7 +104,9 @@ class ParamLocation(enum.Flag):
class NoDefault:
- """Class used to allow distinction between not setting a default and setting a default of ``None``."""
+ """Class used to allow distinction between not setting a default and setting a default of
+ ``None``.
+ """
def __init__(self):
raise NotImplementedError("You cannot create an instance of NoDefault")
@@ -121,13 +123,12 @@ class Serializer:
r"""
Abstract class describing serialize/deserialize operations for Parameter data.
- Parameters need to be stored to and read from database files. This currently
- requires that the Parameter data be converted to a numpy array of a datatype
- supported by the ``h5py`` package. Some parameters may contain data that are not
- trivially representable in numpy/HDF5, and need special treatment. Subclassing
- ``Serializer`` and setting it as a ``Parameter``\ s ``serializer`` allows for special
- operations to be performed on the parameter values as they are stored to the
- database or read back in.
+ Parameters need to be stored to and read from database files. This currently requires that the
+ Parameter data be converted to a numpy array of a datatype supported by the ``h5py`` package.
+ Some parameters may contain data that are not trivially representable in numpy/HDF5, and need
+ special treatment. Subclassing ``Serializer`` and setting it as a ``Parameter``\ s
+ ``serializer`` allows for special operations to be performed on the parameter values as they are
+ stored to the database or read back in.
The ``Database3`` already knows how to handle certain cases where the data are not
straightforward to get into a numpy array, such as when:
@@ -137,35 +138,33 @@ class Serializer:
- The dimensions of the values stored on each object are inconsistent (e.g.,
"jagged" arrays)
- So, in these cases, a Serializer is not needed. Serializers are necessary for when
- the actual data need to be converted to a native data type (e.g., int, float, etc.)
- For example, we use a Serializer to handle writing ``Flags`` to the Database, as
- they tend to be too big to fit into a system-native integer.
+ So, in these cases, a Serializer is not needed. Serializers are necessary for when the actual
+ data need to be converted to a native data type (e.g., int, float, etc). For example, we use a
+ Serializer to handle writing ``Flags`` to the Database, as they tend to be too big to fit into a
+ system-native integer.
.. important::
- Defining a Serializer for a Parameter in part defines the underlying
- representation of the data within a database file; the data stored in a database
- are sensitive to the code that wrote them. Changing the method that a Serializer
- uses to pack or unpack data may break compatibility with old database files.
- Therefore, Serializers should be diligent about signalling changes by updating
- their version. It is also good practice, whenever possible, to support reading
- old versions so that database files written by old versions can still be read.
+ Defining a Serializer for a Parameter in part defines the underlying representation of the
+ data within a database file; the data stored in a database are sensitive to the code that
+ wrote them. Changing the method that a Serializer uses to pack or unpack data may break
+ compatibility with old database files. Therefore, Serializers should be diligent about
+ signaling changes by updating their version. It is also good practice, whenever possible,
+ to support reading old versions so that database files written by old versions can still be
+ read.
.. impl:: Users can define custom parameter serializers.
:id: I_ARMI_PARAM_SERIALIZE
:implements: R_ARMI_PARAM_SERIALIZE
- Important physical parameters are stored in every ARMI object.
- These parameters represent the plant's state during execution
- of the model. Currently, this requires that the parameters be serializable to a
- numpy array of a datatype supported by the ``h5py`` package so that the data can
- be written to, and subsequently read from, an HDF5 file.
+ Important physical parameters are stored in every ARMI object. These parameters represent
+ the plant's state during execution of the model. Currently, this requires that the
+ parameters be serializable to a numpy array of a datatype supported by the ``h5py`` package
+ so that the data can be written to, and subsequently read from, an HDF5 file.
- This class allows for these parameters to be serialized in a custom manner by
- providing interfaces for packing and unpacking parameter data. The user or
- downstream plugin is able to specify how data is serialized if that data is not
- naturally serializable.
+ This class allows for these parameters to be serialized in a custom manner by providing
+ interfaces for packing and unpacking parameter data. The user or downstream plugin is able
+ to specify how data is serialized if that data is not naturally serializable.
See Also
--------
@@ -174,20 +173,19 @@ class Serializer:
armi.reactor.flags.FlagSerializer
"""
- # This will accompany the packed data as an attribute when written, and will be
- # provided to the unpack() method when reading. If the underlying format of the data
- # changes, make sure to change this.
+ # This will accompany the packed data as an attribute when written, and will be provided to the
+ # unpack() method when reading. If the underlying format of the data changes, make sure to
+ # change this.
version: Optional[str] = None
@staticmethod
- def pack(data: Sequence[any]) -> Tuple[numpy.ndarray, Dict[str, any]]:
+ def pack(data: Sequence[any]) -> Tuple[np.ndarray, Dict[str, any]]:
"""
- Given unpacked data, return packed data and a dictionary of attributes needed to
- unpack it.
+ Given unpacked data, return packed data and a dictionary of attributes needed to unpack it.
- This should perform the fundamental packing operation, returning the packed data
- and any metadata ("attributes") that would be necessary to unpack the data. The
- class's version is always stored, so no need to provide it as an attribute.
+ This should perform the fundamental packing operation, returning the packed data and any
+ metadata ("attributes") that would be necessary to unpack the data. The class's version is
+ always stored, so no need to provide it as an attribute.
See Also
--------
@@ -197,7 +195,7 @@ def pack(data: Sequence[any]) -> Tuple[numpy.ndarray, Dict[str, any]]:
@classmethod
def unpack(
- cls, data: numpy.ndarray, version: Any, attrs: Dict[str, any]
+ cls, data: np.ndarray, version: Any, attrs: Dict[str, any]
) -> Sequence[any]:
"""Given packed data and attributes, return the unpacked data."""
raise NotImplementedError()
@@ -218,10 +216,10 @@ def isNumpyArray(paramStr):
"""
def setParameter(selfObj, value):
- if value is None or isinstance(value, numpy.ndarray):
+ if value is None or isinstance(value, np.ndarray):
setattr(selfObj, "_p_" + paramStr, value)
else:
- setattr(selfObj, "_p_" + paramStr, numpy.array(value))
+ setattr(selfObj, "_p_" + paramStr, np.array(value))
return setParameter
@@ -232,11 +230,11 @@ class Parameter:
_validName = re.compile("^[a-zA-Z0-9_]+$")
- # Using slots because Parameters are pretty static and mostly POD. __slots__ make
- # this official, and offer some performance benefits in memory (not too important;
- # there aren't that many instances of Parameter to begin with) and attribute access
- # time (more important, since we need to go through Parameter objects to get to a
- # specific parameter's value in a ParameterCollection)
+ # Using slots because Parameters are pretty static and mostly POD. __slots__ make this official,
+ # and offer some performance benefits in memory (not too important; there aren't that many
+ # instances of Parameter to begin with) and attribute access time (more important, since we need
+ # to go through Parameter objects to get to a specific parameter's value in a
+ # ParameterCollection)
__slots__ = (
"name",
"fieldName",
@@ -266,16 +264,12 @@ def __init__(
categories,
serializer: Optional[Type[Serializer]] = None,
):
- assert self._validName.match(name), "{} is not a valid param name".format(name)
# nonsensical to have a serializer with no intention of saving to DB
assert not (serializer is not None and not saveToDB)
assert serializer is None or saveToDB
- # TODO: This warning is temporary. At some point, it will become an AssertionError.
- if not len(description):
- runLog.warning(
- f"DeprecationWarning: Parameter {name} defined without description.",
- single=True,
- )
+ assert self._validName.match(name), "{} is not a valid param name".format(name)
+ assert len(description), f"Parameter {name} defined without description."
+
self.collectionType = _Undefined
self.name = name
self.fieldName = "_p_" + name
@@ -300,8 +294,8 @@ def paramGetter(p_self):
value = getattr(p_self, self.fieldName)
if value is NoDefault:
raise ParameterError(
- "Cannot get value for parameter `{}` in `{}` as no default has "
- "been defined, and no value has been assigned.".format(
+ "Cannot get value for parameter `{}` in `{}` as no default has been "
+ "defined, and no value has been assigned.".format(
self.name, type(p_self)
)
)
@@ -342,9 +336,9 @@ def __get__(self, obj, cls=None):
Notes
-----
- We do not check to see if ``cls != None``. This is an optimization choice, that
- someone may deem unnecessary. As a result, unlike Python's ``property`` class, a
- subclass cannot override the getter method.
+ We do not check to see if ``cls != None``. This is an optimization choice, that someone may
+ deem unnecessary. As a result, unlike Python's ``property`` class, a subclass cannot
+ override the getter method.
"""
return self._getter(obj)
@@ -355,18 +349,17 @@ def setter(self, setter):
:id: I_ARMI_PARAM_PARALLEL
:implements: R_ARMI_PARAM_PARALLEL
- Parameters need to be handled properly during parallel code execution. This
- includes notifying processes if a parameter has been updated by
- another process. This method allows for setting a parameter's value as well
- as an attribute that signals whether this parameter has been updated. Future
- processes will be able to query this attribute so that the parameter's
- status is properly communicated.
+ Parameters need to be handled properly during parallel code execution. This includes
+ notifying processes if a parameter has been updated by another process. This method
+ allows for setting a parameter's value as well as an attribute that signals whether this
+ parameter has been updated. Future processes will be able to query this attribute so
+ that the parameter's status is properly communicated.
Notes
-----
- Unlike the traditional Python ``property`` class, this does not return a new
- instance of a ``Parameter``; therefore it cannot be reassigned in the same way
- that a Python ``property`` can be.
+ Unlike the traditional Python ``property`` class, this does not return a new instance of a
+ ``Parameter``; therefore it cannot be reassigned in the same way that a Python ``property``
+ can be.
Examples
--------
@@ -428,6 +421,10 @@ def atLocation(self, loc):
"""True if parameter is defined at location."""
return self.location and self.location & loc
+ def hasCategory(self, category: str) -> bool:
+ """True if a parameter has a specific category."""
+ return category in self.categories
+
class ParameterDefinitionCollection:
"""
@@ -435,14 +432,13 @@ class ParameterDefinitionCollection:
Notes
-----
- ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection``
- contains definitions for only one type. If the collection only exists for 1 type,
- the lookup (``__getitem__``) can short circuit O(n) logic for O(1) dictionary
- lookup.
+ ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection`` contains
+ definitions for only one type. If the collection only exists for 1 type, the lookup
+ (``__getitem__``) can short circuit O(n) logic for O(1) dictionary lookup.
"""
- # Slots are not being used here as an attempt at optimization. Rather, they serve to
- # add some needed rigidity to the parameter system.
+ # Slots are not being used here as an attempt at optimization. Rather, they serve to add some
+ # needed rigidity to the parameter system.
__slots__ = ("_paramDefs", "_paramDefDict", "_representedTypes", "_locked")
def __init__(self):
@@ -462,15 +458,15 @@ def __getitem__(self, name):
Notes
-----
- This method might break if the collection is for multiple composite types, and
- there exists a parameter with the same name in multiple types.
+ This method might break if the collection is for multiple composite types, and there exists
+ a parameter with the same name in multiple types.
"""
# O(1) lookup if there is only 1 type, could still raise a KeyError
if len(self._representedTypes) == 1:
return self._paramDefDict[name, next(iter(self._representedTypes))]
- # "matches" only checks for the same name, while the add method checks both name
- # and collectionType
+ # "matches" only checks for the same name, while the add method checks both name and
+ # collectionType
matches = [pd for pd in self if pd.name == name]
if len(matches) != 1:
raise KeyError(
@@ -507,8 +503,8 @@ def extend(self, other):
assert self is not other
if other is None:
raise ValueError(
- f"Cannot extend {self} with `None`. "
- "Ensure return value of parameter definitions returns something."
+ f"Cannot extend {self} with `None`. Ensure return value of parameter definitions "
+ "returns something."
)
for pd in other:
self.add(pd)
@@ -524,8 +520,8 @@ def atLocation(self, paramLoc):
"""
Make a param definition collection with all defs defined at a specific location.
- Parameters can be defined at various locations within their container
- based on :py:class:`ParamLocation`. This allows selection by those values.
+ Parameters can be defined at various locations within their container based on
+ :py:class:`ParamLocation`. This allows selection by those values.
"""
return self._filter(lambda pd: pd.atLocation(paramLoc))
@@ -605,15 +601,15 @@ def toWriteToDB(self, assignedMask: Optional[int] = None):
:id: I_ARMI_PARAM_DB
:implements: R_ARMI_PARAM_DB
- This method is called when writing the parameters to the database file. It
- queries the parameter's ``saveToDB`` attribute to ensure that this parameter
- is desired for saving to the database file. It returns a list of parameters
- that should be included in the database write operation.
+ This method is called when writing the parameters to the database file. It queries the
+ parameter's ``saveToDB`` attribute to ensure that this parameter is desired for saving
+ to the database file. It returns a list of parameters that should be included in the
+ database write operation.
Parameters
----------
assignedMask : int
- a bitmask to down-filter which params to use based on how "stale" they are.
+ A bitmask to down-filter which params to use based on how "stale" they are.
"""
mask = assignedMask or SINCE_ANYTHING
return [p for p in self if p.saveToDB and p.assigned & mask]
@@ -623,8 +619,8 @@ def createBuilder(self, *args, **kwargs):
Create an associated object that can create definitions into this collection.
Using the returned ParameterBuilder will add all defined parameters to this
- ParameterDefinitionCollection, using the passed arguments as defaults. Arguments
- should be valid arguments to ``ParameterBuilder.__init__()``
+ ParameterDefinitionCollection, using the passed arguments as defaults. Arguments should be
+ valid arguments to ``ParameterBuilder.__init__()``
"""
paramBuilder = ParameterBuilder(*args, **kwargs)
paramBuilder.associateParameterDefinitionCollection(self)
@@ -675,9 +671,9 @@ def associateParameterDefinitionCollection(self, paramDefs):
"""
Associate this parameter factory with a specific ParameterDefinitionCollection.
- Subsequent calls to defParam will automatically add the created
- ParameterDefinitions to this ParameterDefinitionCollection. This results in a
- cleaner syntax when defining many ParameterDefinitions.
+ Subsequent calls to defParam will automatically add the created ParameterDefinitions to this
+ ParameterDefinitionCollection. This results in a cleaner syntax when defining many
+ ParameterDefinitions.
"""
self._paramDefs = paramDefs
@@ -737,10 +733,9 @@ def defParam(
Notes
-----
- It is not possible to initialize the parameter on the class this method would be
- used on, because there is no instance (i.e. self) when this method is run.
- However, this method could access a globally available set of definitions, if
- one existed.
+ It is not possible to initialize the parameter on the class this method would be used on,
+ because there is no instance (i.e. self) when this method is run. However, this method could
+ access a globally available set of definitions, if one existed.
"""
self._assertDefaultIsProperType(default)
if location is None and self._defaultLocation is None:
@@ -766,7 +761,6 @@ def defParam(
return paramDef
-# Container for all parameter definition collections that have been bound to an
-# ArmiObject or subclass. These are added from the applyParameters() method on
-# the ParameterCollection class.
+# Container for all parameter definition collections that have been bound to an ArmiObject or
+# subclass. These are added from the applyParameters() method on the ParameterCollection class.
ALL_DEFINITIONS = ParameterDefinitionCollection()
diff --git a/armi/reactor/reactorParameters.py b/armi/reactor/reactorParameters.py
index cc9fe3454..9b0f0be83 100644
--- a/armi/reactor/reactorParameters.py
+++ b/armi/reactor/reactorParameters.py
@@ -22,19 +22,6 @@
def defineReactorParameters():
pDefs = parameters.ParameterDefinitionCollection()
- pDefs.add(
- parameters.Parameter(
- "rdIterNum",
- units=units.UNITLESS,
- description="Integer number of region-density equilibrium iterations",
- location=ParamLocation.AVERAGE,
- saveToDB=True,
- default=parameters.NoDefault,
- setter=parameters.NoDefault,
- categories=set(),
- )
- )
-
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:
pb.defParam(
"cycle",
@@ -122,31 +109,29 @@ def defineCoreParameters():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
-
pb.defParam(
"detailedNucKeys",
setter=isNumpyArray("detailedNucKeys"),
units=units.UNITLESS,
- description="""Nuclide vector keys, used to map densities in b.p.detailedNDens and a.p.detailedNDens.
- ZZZAAA (ZZZ atomic number, AAA mass number, + 100 * m for metastable states.""",
+ description="""Nuclide vector keys, used to map densities in b.p.detailedNDens and
+ a.p.detailedNDens.ZZZAAA (ZZZ atomic number, AAA mass number, + 100 * m for metastable
+ states.""",
saveToDB=True,
default=None,
)
with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:
-
pb.defParam(
"orientation",
units=units.DEGREES,
description=(
- "Triple representing rotations counterclockwise around each spatial axis. For example, "
- "a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
+ "Triple representing rotations counterclockwise around each spatial axis. For "
+ "example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
),
default=None,
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:
-
pb.defParam(
"maxAssemNum",
units=units.UNITLESS,
@@ -157,7 +142,6 @@ def defineCoreParameters():
pb.defParam("numMoves", units=units.UNITLESS, description="numMoves", default=0)
with pDefs.createBuilder(location="N/A", categories=["control rods"]) as pb:
-
pb.defParam(
"crMostValuablePrimaryRodLocation",
default="",
@@ -181,14 +165,20 @@ def defineCoreParameters():
default=0.0,
units=units.PCM,
saveToDB=True,
- description="Worth requirement for the primary control rods in the reactor core to achieve safe shutdown.",
+ description=(
+ "Worth requirement for the primary control rods in the reactor core to "
+ "achieve safe shutdown."
+ ),
)
pb.defParam(
"crWorthRequiredSecondary",
default=0.0,
units=units.PCM,
saveToDB=True,
- description="Worth requirement for the secondary control rods in the reactor core to achieve safe shutdown.",
+ description=(
+ "Worth requirement for the secondary control rods in the reactor core to "
+ "achieve safe shutdown."
+ ),
)
pb.defParam(
"crTransientOverpowerWorth",
@@ -196,13 +186,12 @@ def defineCoreParameters():
units=units.PCM,
saveToDB=True,
description=(
- "Reactivity worth introduced by removal of the highest worth primary "
- "control rod from the core, starting from its critical position"
+ "Reactivity worth introduced by removal of the highest worth primary control rod "
+ "from the core, starting from its critical position"
),
)
with pDefs.createBuilder() as pb:
-
pb.defParam(
"axialMesh",
units=units.CM,
@@ -212,11 +201,13 @@ def defineCoreParameters():
)
with pDefs.createBuilder(default=0.0, location="N/A") as pb:
-
pb.defParam(
"referenceBlockAxialMesh",
units=units.CM,
- description="The axial block boundaries that assemblies should conform to in a uniform mesh case.",
+ description=(
+ "The axial block boundaries that assemblies should conform to in a "
+ "uniform mesh case."
+ ),
default=None,
)
@@ -229,8 +220,8 @@ def defineCoreParameters():
pb.defParam(
"doublingTime",
units=units.YEARS,
- description="""The time it takes to produce enough spent fuel to fuel a daughter reactor,
- in effective number of years at full power.""",
+ description="""The time it takes to produce enough spent fuel to fuel a daughter
+ reactor, in effective number of years at full power.""",
)
pb.defParam(
@@ -272,8 +263,9 @@ def defineCoreParameters():
pb.defParam(
"maxcladFCCI",
units=units.MICRONS,
- description="The core wide maximum amount of cladding wastage due to fuel chemical clad interaction calculated "
- + "at the 0-sigma TH HCF temperatures and using the conservative FCCI model",
+ description="The core wide maximum amount of cladding wastage due to fuel chemical "
+ + "clad interaction calculated at the 0-sigma TH HCF temperatures and using the "
+ + "conservative FCCI model",
default=0.0,
)
@@ -303,18 +295,6 @@ def defineCoreParameters():
description="Run time since the beginning of the calculation",
)
- pb.defParam(
- "outsideFuelRing",
- units=units.UNITLESS,
- description="The ring (integer) with the fraction of flux that best meets the target",
- )
-
- pb.defParam(
- "outsideFuelRingFluxFr",
- units=units.UNITLESS,
- description="Ratio of the flux in a ring to the total reactor fuel flux",
- )
-
pb.defParam(
"peakGridDpaAt60Years",
units=units.DPA,
@@ -340,7 +320,8 @@ def defineCoreParameters():
pb.defParam(
"THmaxDeltaPPump",
units=units.PASCALS,
- description="The maximum pumping pressure rise required to pump the given mass flow rate through the rod bundle",
+ description="The maximum pumping pressure rise required to pump the given mass flow "
+ + "rate through the rod bundle",
)
pb.defParam(
@@ -516,8 +497,8 @@ def defineCoreParameters():
"jumpRing",
units=units.UNITLESS,
description=(
- "Radial ring number where bred-up fuel assemblies shuffle jump from the low power to the "
- "high power region."
+ "Radial ring number where bred-up fuel assemblies shuffle jump from the low power "
+ "to the high power region."
),
)
diff --git a/armi/reactor/reactors.py b/armi/reactor/reactors.py
index 7c10e204e..f18f048d4 100644
--- a/armi/reactor/reactors.py
+++ b/armi/reactor/reactors.py
@@ -13,15 +13,12 @@
# limitations under the License.
"""
-Reactor objects represent the highest level in the hierarchy of
-structures that compose the system to be modeled. Core objects
-represent collections of assemblies.
-
-Core is a high-level object in the data model in ARMI. They
-contain assemblies which in turn contain more refinement in
-representing the physical reactor. The reactor is the owner of
-many of the plant-wide state variables such as keff, cycle,
-and node.
+Reactor objects represent the highest level in the hierarchy of structures that compose the system
+to be modeled. Core objects represent collections of assemblies.
+
+Core is a high-level object in the data model in ARMI. They contain assemblies which in turn contain
+more refinement in representing the physical reactor. The reactor is the owner of many of the plant-
+wide state variables such as keff, cycle, and node.
"""
from typing import Optional
import collections
@@ -30,8 +27,7 @@
import os
import time
-import numpy
-import tabulate
+import numpy as np
from armi import getPluginManagerOrFail, materials, nuclearDataIO
from armi import runLog
@@ -61,14 +57,14 @@
)
from armi.utils import createFormattedStrWithDelimiter, units
from armi.utils import directoryChangers
+from armi.utils import tabulate
from armi.utils.iterables import Sequence
from armi.utils.mathematics import average1DWithinTolerance
class Reactor(composites.Composite):
"""
- Top level of the composite structure, potentially representing all
- components in a reactor.
+ Top level of the composite structure, potentially representing all components in a reactor.
This class contains the core and any ex-core structures that are to be represented in the ARMI
model. Historically, the ``Reactor`` contained only the core. To support better representation
@@ -102,7 +98,6 @@ def __init__(self, name, blueprints):
self.spatialLocator = None
self.p.maxAssemNum = 0
self.p.cycle = 0
- self.p.flags |= Flags.REACTOR
self.core = None
self.sfp = None
self.blueprints = blueprints
@@ -127,7 +122,7 @@ def __repr__(self):
def add(self, container):
composites.Composite.add(self, container)
- cores = self.getChildrenWithFlags(Flags.CORE)
+ cores = [c for c in self.getChildren(deep=True) if isinstance(c, Core)]
if cores:
if len(cores) != 1:
raise ValueError(
@@ -245,7 +240,7 @@ def factory(cs, bp, geom: Optional[SystemLayoutInput] = None) -> Reactor:
class Core(composites.Composite):
- """
+ r"""
Reactor structure made up of assemblies. Could be a Core, spent fuel pool, reactor head, etc.
This has the bulk of the data management operations.
@@ -255,10 +250,9 @@ class Core(composites.Composite):
:implements: R_ARMI_R_CORE
A :py:class:`Core ` object is typically a child of a
- :py:class:`Reactor ` object. A Reactor can contain multiple
- objects of the Core type. The instance attribute name ``r.core`` is reserved for the object
- representating the active core. A reactor may also have a spent fuel pool instance
- attribute, ``r.sfp``, which is also of type :py:class:`core `.
+ :py:class:`Reactor ` object. A Reactor should only contain
+ one object of the Core type. The instance attribute name ``r.core`` is reserved for the
+ object representating the active core.
Most of the operations to retrieve information from the ARMI reactor data model are mediated
through Core objects. For example,
@@ -270,7 +264,6 @@ class Core(composites.Composite):
params : dict
Core-level parameters are scalar values that have time dependence. Examples are keff,
maxPercentBu, etc.
-
assemblies : list
List of assembly objects that are currently in the core
"""
@@ -287,7 +280,6 @@ def __init__(self, name):
Name of the object. Flags will inherit from this.
"""
composites.Composite.__init__(self, name)
- self.p.flags = Flags.fromStringIgnoreErrors(name)
self.assembliesByName = {}
self.circularRingList = {}
self.blocksByName = {} # lookup tables
@@ -301,7 +293,7 @@ def __init__(self, name):
self.timeOfStart = time.time()
self.zones = zones.Zones() # initialize with empty Zones object
# initialize the list that holds all shuffles
- self.moveList = {}
+ self.moves = {}
self.scalarVals = {}
self._nuclideCategories = {}
self.typeList = [] # list of block types to convert name - to -number.
@@ -502,7 +494,7 @@ def summarizeReactorStats(self):
("Fissile Mass (kg)", fissileMass),
("Heavy Metal Mass (kg)", heavyMetalMass),
],
- tablefmt="armi",
+ tableFmt="armi",
)
)
@@ -886,14 +878,14 @@ def countBlocksWithFlags(self, blockTypeSpec, assemTypeSpec=None):
The types of blocks to be counted in a single assembly
assemTypeSpec : Flags or list of Flags
- The types of assemblies that are to be examine for the blockTypes
- of interest. None is every assembly
+ The types of assemblies that are to be examine for the blockTypes of interest. None is
+ every assembly.
Returns
-------
maxBlocks : int
- The maximum number of blocks of the specified types in a single
- assembly in the entire core
+ The maximum number of blocks of the specified types in a single assembly in the entire
+ core.
"""
assems = self.getAssemblies(typeSpec=assemTypeSpec)
try:
@@ -1018,8 +1010,8 @@ def getAssembliesInSquareOrHexRing(
self, ring, typeSpec=None, exactType=False, exclusions=None
):
"""
- Returns the assemblies in a specified ring. Definitions of rings can change
- with problem parameters.
+ Returns the assemblies in a specified ring. Definitions of rings can change with problem
+ parameters.
Parameters
----------
@@ -1066,9 +1058,8 @@ def getAssembliesInCircularRing(
self, ring, typeSpec=None, exactType=False, exclusions=None
):
"""
- Gets an assemblies within a circular range of the center of the core. This
- function allows for more circular styled assembly shuffling instead of the
- current hex approach.
+ Gets an assemblies within a circular range of the center of the core. This function allows
+ for more circular styled assembly shuffling instead of the current hex approach.
Parameters
----------
@@ -1120,7 +1111,8 @@ def getAssembliesInCircularRing(
def buildCircularRingDictionary(self, ringPitch=1.0):
"""
- Builds a dictionary of all circular rings in the core. This is required information for getAssembliesInCircularRing.
+ Builds a dictionary of all circular rings in the core. This is required information for
+ getAssembliesInCircularRing.
The purpose of this function is to allow for more circular core shuffling in the hex design.
@@ -1160,12 +1152,12 @@ def _getAssembliesByName(self):
for assem in self.getAssemblies(includeBolAssems=True, includeSFP=True):
aName = assem.getName()
if aName in assymap and assymap[aName] != assem:
- # dangerous situation that can occur in restart runs where the global assemNum isn't updated.
- # !=assem clause added because sometimes an assem is in one of the includeAll lists that is also in the
- # core and that's ok.
+ # dangerous situation that can occur in restart runs where the global assemNum isn't
+ # updated. !=assem clause added because sometimes an assem is in one of the
+ # includeAll lists that is also in the core and that's ok.
runLog.error(
- "Two (or more) assemblies in the reactor (and associated lists) have the name {0},\n"
- "including {1} and {2}.".format(aName, assem, assymap[aName])
+ "Two (or more) assemblies in the reactor (and associated lists) have the name "
+ "{0},\nincluding {1} and {2}.".format(aName, assem, assymap[aName])
)
raise RuntimeError("Assembly name collision.")
@@ -1179,8 +1171,8 @@ def getAssemblyByName(self, name):
:id: I_ARMI_R_GET_ASSEM_NAME
:implements: R_ARMI_R_GET_ASSEM_NAME
- This method returns the :py:class:`assembly
- ` with a name matching the
+ This method returns the :py:class:`assembly `
+ with a name matching the
value provided as an input parameter to this function. The ``name`` of
an assembly is based on the ``assemNum`` parameter.
@@ -1266,8 +1258,8 @@ def getAssemblies(
return assems
def getNozzleTypes(self):
- """
- Get a dictionary of all of the assembly ``nozzleType``s in the core.
+ r"""
+ Get a dictionary of all of the assembly ``nozzleType``\ s in the core.
Returns
-------
@@ -1516,7 +1508,7 @@ def summarizeNuclideCategories(self):
),
],
headers=["Nuclide Category", "Nuclides"],
- tablefmt="armi",
+ tableFmt="armi",
)
)
@@ -1634,7 +1626,7 @@ def getFluxVector(
newFlux.extend(oneGroup)
flux = newFlux
- return numpy.array(flux)
+ return np.array(flux)
def getAssembliesOfType(self, typeSpec, exactMatch=False):
"""Return a list of assemblies in the core that are of type assemType."""
@@ -1737,7 +1729,7 @@ def getAssemblyPitch(self):
def findNeighbors(
self, a, showBlanks=True, duplicateAssembliesOnReflectiveBoundary=False
):
- """
+ r"""
Find assemblies that are next to this assembly.
Return a list of neighboring assemblies.
@@ -1768,7 +1760,7 @@ def findNeighbors(
The ``duplicateAssembliesOnReflectiveBoundary`` setting only works for
1/3 core symmetry with periodic boundary conditions. For these types
- of geometries, if this setting is ``True``, neighbor lists for
+ of geometries, if this setting is ``True``\ , neighbor lists for
assemblies along a periodic boundary will include the assemblies
along the opposite periodic boundary that are effectively neighbors.
@@ -1814,7 +1806,7 @@ def findNeighbors(
This uses the 'mcnp' index map (MCNP GEODST hex coordinates) instead of
the standard (ring, pos) map. because neighbors have consistent indices
- this way. We then convert over to (ring, pos) using the lookup table
+ this way. We then convert over to (ring, pos) using the lookup table
that a reactor has.
Returns
@@ -1894,13 +1886,12 @@ def _getReflectiveDuplicateAssembly(self, neighborLoc):
def setMoveList(self, cycle, oldLoc, newLoc, enrichList, assemblyType, assemName):
"""Tracks the movements in terms of locations and enrichments."""
data = (oldLoc, newLoc, enrichList, assemblyType, assemName)
- # NOTE: moveList is actually a moveDict (misnomer)
- if self.moveList.get(cycle) is None:
- self.moveList[cycle] = []
- if data in self.moveList[cycle]:
+ if self.moves.get(cycle) is None:
+ self.moves[cycle] = []
+ if data in self.moves[cycle]:
# remove the old version and throw the new on at the end.
- self.moveList[cycle].remove(data)
- self.moveList[cycle].append(data)
+ self.moves[cycle].remove(data)
+ self.moves[cycle].append(data)
def createFreshFeed(self, cs=None):
"""
@@ -2091,7 +2082,7 @@ def findAllMeshPoints(self, assems=None, applySubMesh=True):
for axis, (collection, subdivisions) in enumerate(
zip((iMesh, jMesh, kMesh), numPoints)
):
- axisVal = float(base[axis]) # convert from numpy.float64
+ axisVal = float(base[axis]) # convert from np.float64
step = float(top[axis] - axisVal) / subdivisions
for _subdivision in range(subdivisions):
collection.add(round(axisVal, units.FLOAT_DIMENSION_DECIMALS))
@@ -2132,7 +2123,7 @@ def updateAxialMesh(self):
refAssem = self.refAssem
refMesh = self.findAllAxialMeshPoints([refAssem])
avgHeight = average1DWithinTolerance(
- numpy.array(
+ np.array(
[
[
h
@@ -2144,7 +2135,7 @@ def updateAxialMesh(self):
]
)
)
- self.p.axialMesh = list(numpy.append([0.0], avgHeight.cumsum()))
+ self.p.axialMesh = list(np.append([0.0], avgHeight.cumsum()))
def findAxialMeshIndexOf(self, heightCm):
"""
@@ -2218,12 +2209,11 @@ def findAllRadMeshPoints(self, extraAssems=None, applySubMesh=True):
Parameters
----------
extraAssems : list
- additional assemblies to consider when determining the mesh points. They may
- be useful in the MCPNXT models to represent the fuel management dummies.
+ additional assemblies to consider when determining the mesh points. They may be useful
+ in the MCPNXT models to represent the fuel management dummies.
applySubMesh : bool
- (not implemented) generates submesh points to further discretize the radial
- reactor mesh
+ (not implemented) generates submesh points to further discretize the radial reactor mesh
"""
_, j, _ = self.findAllMeshPoints(extraAssems, applySubMesh)
return j
@@ -2252,9 +2242,8 @@ def getMaxNumPins(self):
def getMinimumPercentFluxInFuel(self, target=0.005):
"""
- Goes through the entire reactor to determine what percentage of flux occurs at
- each ring. Starting with the outer ring, this function helps determine the effective
- size of the core where additional assemblies will not help the breeding in the TWR.
+ Starting with the outer ring, this method goes through the entire Reactor to determine what
+ percentage of flux occurs at each ring.
Parameters
----------
diff --git a/armi/reactor/tests/test_assemblies.py b/armi/reactor/tests/test_assemblies.py
index 37e9df87b..978e8b3e0 100644
--- a/armi/reactor/tests/test_assemblies.py
+++ b/armi/reactor/tests/test_assemblies.py
@@ -39,7 +39,6 @@
Flags,
grids,
HexAssembly,
- numpy,
runLog,
)
from armi.reactor.tests import test_reactors
@@ -134,7 +133,7 @@ def buildTestAssemblies():
assemblieObjs = []
for numBlocks, blockTemplate in zip([1, 1, 5, 4], [block, block2, block, block]):
assembly = assemblies.HexAssembly("testAssemblyType")
- assembly.spatialGrid = grids.axialUnitGrid(numBlocks)
+ assembly.spatialGrid = grids.AxialGrid.fromNCells(numBlocks)
assembly.spatialGrid.armiObject = assembly
for _i in range(numBlocks):
newBlock = copy.deepcopy(blockTemplate)
@@ -184,7 +183,7 @@ def makeTestAssembly(
):
coreGrid = r.core.spatialGrid if r is not None else spatialGrid
a = HexAssembly("TestAssem", assemNum=assemNum)
- a.spatialGrid = grids.axialUnitGrid(numBlocks)
+ a.spatialGrid = grids.AxialGrid.fromNCells(numBlocks)
a.spatialGrid.armiObject = a
a.spatialLocator = coreGrid[2, 2, 0]
return a
@@ -270,6 +269,10 @@ def setUp(self):
self.assembly.calculateZCoords()
+ def test_isOnWhichSymmetryLine(self):
+ line = self.assembly.isOnWhichSymmetryLine()
+ self.assertEqual(line, 2)
+
def test_notesParameter(self):
self.assertEqual(self.assembly.p.notes, "")
@@ -320,10 +323,15 @@ def test_extend(self):
def test_add(self):
a = makeTestAssembly(1, 1)
- b = blocks.HexBlock("TestBlock")
- a.add(b)
- self.assertIn(b, a)
- self.assertEqual(b.parent, a)
+
+ # successfully add some Blocks to an Assembly
+ for n in range(3):
+ self.assertEqual(len(a), n)
+ b = blocks.HexBlock("TestBlock")
+ a.add(b)
+ self.assertIn(b, a)
+ self.assertEqual(b.parent, a)
+ self.assertEqual(len(a), n + 1)
def test_moveTo(self):
ref = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 10)
@@ -363,10 +371,14 @@ def test_getArea(self):
:id: T_ARMI_ASSEM_DIMS0
:tests: R_ARMI_ASSEM_DIMS
"""
+ # Default case: for assemblies with no blocks
+ a = HexAssembly("TestAssem", assemNum=10)
+ self.assertEqual(a.getArea(), 1)
+
+ # more realistic case: a hex block/assembly
cur = self.assembly.getArea()
ref = math.sqrt(3) / 2.0 * self.hexDims["op"] ** 2
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
+ self.assertAlmostEqual(cur, ref, places=6)
def test_getVolume(self):
"""Tests volume calculation for hex assembly.
@@ -479,18 +491,6 @@ def test_getFissileMass(self):
ref = sum(bi.getMass(["U235", "PU239"]) for bi in self.assembly)
self.assertAlmostEqual(cur, ref)
- def test_getPuFrac(self):
- puAssem = self.assembly.getPuFrac()
- fuelBlock = self.assembly[1]
- puBlock = fuelBlock.getPuFrac()
- self.assertAlmostEqual(puAssem, puBlock)
-
- #
- fuelComp = fuelBlock.getComponent(Flags.FUEL)
- fuelComp.setNumberDensity("PU239", 0.012)
- self.assertGreater(self.assembly.getPuFrac(), puAssem)
- self.assertGreater(fuelBlock.getPuFrac(), puAssem)
-
def test_getMass(self):
mass0 = self.assembly.getMass("U235")
mass1 = sum(bi.getMass("U235") for bi in self.assembly)
@@ -505,15 +505,6 @@ def test_getMass(self):
fuelBlock.setMass("U238", 0.0)
self.assertAlmostEqual(blockU35Mass * 2, fuelBlock.getMass("U235"))
- def test_getZrFrac(self):
- self.assertAlmostEqual(self.assembly.getZrFrac(), 0.1)
-
- def test_getMaxUraniumMassEnrich(self):
- baseEnrich = self.assembly[0].getUraniumMassEnrich()
- self.assertAlmostEqual(self.assembly.getMaxUraniumMassEnrich(), baseEnrich)
- self.assembly[2].setNumberDensity("U235", 2e-1)
- self.assertGreater(self.assembly.getMaxUraniumMassEnrich(), baseEnrich)
-
def test_getAge(self):
res = 5.0
for b in self.assembly:
@@ -532,7 +523,6 @@ def test_makeAxialSnapList(self):
# add some blocks with a component
for _i in range(assemNum2):
-
self.hexDims = {
"Tinput": 273.0,
"Thot": 273.0,
@@ -614,8 +604,8 @@ def test_duplicate(self):
for refBlock, curBlock in zip(self.assembly, assembly2):
numNucs = 0
- for nuc in self.assembly.getAncestorWithFlags(
- Flags.REACTOR
+ for nuc in self.assembly.getAncestor(
+ lambda c: isinstance(c, reactors.Reactor)
).blueprints.allNuclidesInProblem:
numNucs += 1
# Block level density
@@ -641,7 +631,7 @@ def test_duplicate(self):
continue
ref = refBlock.p[refParam]
cur = curBlock.p[refParam]
- if isinstance(cur, numpy.ndarray):
+ if isinstance(cur, np.ndarray):
self.assertTrue((cur == ref).all())
else:
if refParam == "location":
@@ -668,14 +658,14 @@ def test_duplicate(self):
continue
ref = self.assembly.p[param]
cur = assembly2.p[param]
- if isinstance(cur, numpy.ndarray):
+ if isinstance(cur, np.ndarray):
assert_allclose(cur, ref)
else:
self.assertEqual(cur, ref)
- # Block level reactor and parent
+ # Block level core and parent
for b in assembly2:
- self.assertEqual(b.r, None)
+ self.assertEqual(b.core, None)
self.assertEqual(b.parent, assembly2)
def test_hasFlags(self):
@@ -728,17 +718,14 @@ def test_getBlockData(self):
for param in paramDict:
cur = list(self.assembly.getChildParamValues(param))
ref = []
- x = 0
- for b in self.blockList:
- ref.append(self.blockList[x].p[param])
- x += 1
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
+ for i, b in enumerate(self.blockList):
+ ref.append(self.blockList[i].p[param])
+ self.assertAlmostEqual(cur, ref, places=6)
def test_getMaxParam(self):
-
for bi, b in enumerate(self.assembly):
b.p.power = bi
+
self.assertAlmostEqual(
self.assembly.getMaxParam("power"), len(self.assembly) - 1
)
@@ -749,7 +736,6 @@ def test_getElevationsMatchingParamValue(self):
self.assembly[2].p.power = 10.0
heights = self.assembly.getElevationsMatchingParamValue("power", 15.0)
-
self.assertListEqual(heights, [12.5, 20.0])
def test_calcAvgParam(self):
@@ -870,6 +856,11 @@ def test_getDim(self):
:id: T_ARMI_ASSEM_DIMS3
:tests: R_ARMI_ASSEM_DIMS
"""
+ # quick test, if there are no blocks
+ a = HexAssembly("TestAssem", assemNum=10)
+ self.assertIsNone(a.getDim(Flags.FUEL, "op"))
+
+ # more interesting test, with blocks
cur = self.assembly.getDim(Flags.FUEL, "op")
ref = self.hexDims["op"]
places = 6
@@ -960,35 +951,29 @@ def test_getParamValuesAtZ(self):
for b in self.assembly:
b.p.percentBu = None
self.assertTrue(
- numpy.isnan(self.assembly.getParamValuesAtZ("percentBu", 25.0))
+ np.isnan(self.assembly.getParamValuesAtZ("percentBu", 25.0))
)
# multiDimensional param
for b, flux in zip(self.assembly, [[1, 10], [2, 8], [3, 6]]):
b.p.mgFlux = flux
self.assertTrue(
- numpy.allclose(
- [2.5, 7.0], self.assembly.getParamValuesAtZ("mgFlux", 20.0)
- )
+ np.allclose([2.5, 7.0], self.assembly.getParamValuesAtZ("mgFlux", 20.0))
)
self.assertTrue(
- numpy.allclose(
- [1.5, 9.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0)
- )
+ np.allclose([1.5, 9.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0))
)
for b in self.assembly:
b.p.mgFlux = [0.0] * 2
self.assertTrue(
- numpy.allclose(
- [0.0, 0.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0)
- )
+ np.allclose([0.0, 0.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0))
)
# single value param at corner
for b, temp in zip(self.assembly, [100, 200, 300]):
b.p.THcornTemp = [temp + iCorner for iCorner in range(6)]
value = self.assembly.getParamValuesAtZ("THcornTemp", 20.0)
- self.assertTrue(numpy.allclose([200, 201, 202, 203, 204, 205], value))
+ self.assertTrue(np.allclose([200, 201, 202, 203, 204, 205], value))
finally:
percentBuDef.location = originalLoc
@@ -1105,6 +1090,9 @@ def test_rotate(self):
a.rotate(math.radians(120))
self.assertIn("No rotation method defined", mock.getStdout())
+ with self.assertRaisesRegex(ValueError, expected_regex="60 degree"):
+ a.rotate(math.radians(40))
+
def test_assem_block_types(self):
"""Test that all children of an assembly are blocks, ordered from top to bottom.
@@ -1163,6 +1151,12 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
igniterFuel = self.r.core.childrenByLocator[grid[0, 0, 0]]
# gridplate, fuel, fuel, fuel, plenum
+ for b in igniterFuel.getBlocks(Flags.FUEL):
+ fuelComp = b.getComponent(Flags.FUEL)
+ # add isotopes from clad and coolant to fuel component to test mass conservation
+ # mass should only be conserved within fuel component, not over the whole block
+ fuelComp.setNumberDensity("FE56", 1e-10)
+ fuelComp.setNumberDensity("NA23", 1e-10)
b = igniterFuel[0]
coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()
coolMass = 0
@@ -1175,6 +1169,8 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
igniterHMMass1 = b.getHMMass()
igniterZircMass1 = b.getMass("ZR")
igniterFuelBlockMass = b.getMass()
+ igniterDuctMass = b.getComponent(Flags.DUCT).getMass()
+ igniterCoolMass = b.getComponent(Flags.COOLANT).getMass()
coolMass = 0
b = igniterFuel[4]
@@ -1199,6 +1195,8 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
b = igniterFuel[1]
igniterHMMass1AfterExpand = b.getHMMass()
igniterZircMass1AfterExpand = b.getMass("ZR")
+ igniterDuctMassAfterExpand = b.getComponent(Flags.DUCT).getMass()
+ igniterCoolMassAfterExpand = b.getComponent(Flags.COOLANT).getMass()
coolMass = 0
b = igniterFuel[4]
@@ -1209,6 +1207,14 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
self.assertAlmostEqual(igniterMassGrid, igniterMassGridAfterExpand, 7)
self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterExpand, 7)
self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterExpand, 7)
+ # demonstrate that the duct and coolant mass are not conserved.
+ # number density stays constant, mass is scaled by ratio of new to old height
+ self.assertAlmostEqual(
+ igniterDuctMass, igniterDuctMassAfterExpand * 25.0 / 26.0, 7
+ )
+ self.assertAlmostEqual(
+ igniterCoolMass, igniterCoolMassAfterExpand * 25.0 / 26.0, 7
+ )
# Note the masses are linearly different by the amount that the plenum shrunk
self.assertAlmostEqual(
igniterPlenumMass, igniterPlenumMassAfterExpand * 75 / 67.0, 7
@@ -1233,6 +1239,8 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
igniterHMMass1AfterShrink = b.getHMMass()
igniterZircMass1AfterShrink = b.getMass("ZR")
igniterFuelBlockMassAfterShrink = b.getMass()
+ igniterDuctMassAfterShrink = b.getComponent(Flags.DUCT).getMass()
+ igniterCoolMassAfterShrink = b.getComponent(Flags.COOLANT).getMass()
coolMass = 0
b = igniterFuel[4]
@@ -1245,6 +1253,8 @@ def test_snapAxialMeshToReferenceConservingMassBasedOnBlockIgniter(self):
self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterShrink, 7)
self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterShrink, 7)
self.assertAlmostEqual(igniterFuelBlockMass, igniterFuelBlockMassAfterShrink, 7)
+ self.assertAlmostEqual(igniterDuctMass, igniterDuctMassAfterShrink, 7)
+ self.assertAlmostEqual(igniterCoolMass, igniterCoolMassAfterShrink, 7)
self.assertAlmostEqual(igniterPlenumMass, igniterPlenumMassAfterShrink, 7)
def test_snapAxialMeshToReferenceConservingMassBasedOnBlockShield(self):
diff --git a/armi/reactor/tests/test_blocks.py b/armi/reactor/tests/test_blocks.py
index ff61a120f..08f729e9a 100644
--- a/armi/reactor/tests/test_blocks.py
+++ b/armi/reactor/tests/test_blocks.py
@@ -13,25 +13,26 @@
# limitations under the License.
"""Tests blocks.py."""
import copy
+import io
import math
import os
import unittest
-import io
+from unittest.mock import MagicMock, patch
-import numpy
+import numpy as np
from numpy.testing import assert_allclose
from armi import materials, runLog, settings, tests
-from armi.reactor import blueprints
-from armi.reactor.components import basicShapes, complexShapes
from armi.nucDirectory import nucDir, nuclideBases
+from armi.nuclearDataIO import xsCollections
from armi.nuclearDataIO.cccc import isotxs
-from armi.physics.neutronics import NEUTRON, GAMMA
+from armi.physics.neutronics import GAMMA, NEUTRON
from armi.physics.neutronics.settings import (
CONF_LOADING_FILE,
CONF_XS_KERNEL,
)
-from armi.reactor import blocks, components, geometry, grids
+from armi.reactor import blocks, blueprints, components, geometry, grids
+from armi.reactor.components import basicShapes, complexShapes
from armi.reactor.flags import Flags
from armi.reactor.tests.test_assemblies import makeTestAssembly
from armi.tests import ISOAA_PATH, TEST_ROOT
@@ -69,8 +70,6 @@ def buildSimpleFuelBlock():
b.add(coolant)
b.add(intercoolant)
- b.getVolumeFractions() # TODO: remove, should be no-op when removed self.cached
-
return b
@@ -231,8 +230,6 @@ def loadTestBlock(cold=True):
block.add(duct)
block.add(interSodium)
- block.getVolumeFractions() # TODO: remove, should be no-op when removed self.cached
-
block.setHeight(16.0)
block.autoCreateSpatialGrids()
@@ -288,7 +285,7 @@ def applyDummyData(block):
xslib._nuclides["WAA"] = xslib._nuclides["W184AA"]
xslib._nuclides["MNAA"] = xslib._nuclides["MN55AA"]
block.p.mgFlux = flux
- block.r.core.lib = xslib
+ block.core.lib = xslib
def getComponentData(component):
@@ -322,18 +319,17 @@ def test_updateDetailedNdens(self):
block = self.r.core[0][0]
# get nuclides in first component in block
adjList = block[0].getNuclides()
- block.p.detailedNDens = numpy.array([1.0])
+ block.p.detailedNDens = np.array([1.0])
block.p.pdensDecay = 1.0
block._updateDetailedNdens(frac=0.5, adjustList=adjList)
self.assertEqual(block.p.pdensDecay, 0.5)
- self.assertEqual(block.p.detailedNDens, numpy.array([0.5]))
+ self.assertEqual(block.p.detailedNDens, np.array([0.5]))
class Block_TestCase(unittest.TestCase):
def setUp(self):
self.block = loadTestBlock()
self._hotBlock = loadTestBlock(cold=False)
- self.r = self.block.r
def test_getSmearDensity(self):
cur = self.block.getSmearDensity()
@@ -803,7 +799,7 @@ def test_getWettedPerimeter(self):
cur = self.block.getWettedPerimeter()
wire = self.block.getComponent(Flags.WIRE)
- correctionFactor = numpy.hypot(
+ correctionFactor = np.hypot(
1.0,
math.pi
* wire.getDimension("helixDiameter")
@@ -881,7 +877,7 @@ def test_setLocation(self):
3,
),
):
- self.r.core.symmetry = geometry.SymmetryType.fromAny(symmetry)
+ self.block.core.symmetry = geometry.SymmetryType.fromAny(symmetry)
i, j = grids.HexGrid.getIndicesFromRingAndPos(1, 1)
b.spatialLocator = b.core.spatialGrid[i, j, 0]
self.assertEqual(0, b.spatialLocator.k)
@@ -1009,8 +1005,12 @@ def test_getUraniumNumEnrich(self):
u5 = self.block.getNumberDensity("U235")
ref = u5 / (u8 + u5)
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
+ self.assertAlmostEqual(cur, ref, places=6)
+
+ # test the zero edge case
+ self.block.adjustUEnrich(0)
+ cur = self.block.getUraniumNumEnrich()
+ self.assertEqual(cur, 0.0)
def test_getNumberOfAtoms(self):
self.block.clearNumberDensities()
@@ -1052,16 +1052,6 @@ def test_getPu(self):
}
fuel.setNumberDensities({nuc: v / vFrac for nuc, v in refDict.items()})
- # test number density
- cur = self.block.getPuN()
- ndens = 0.0
- for nucName in refDict.keys():
- if nucName in ["PU238", "PU239", "PU240", "PU241", "PU242"]:
- ndens += self.block.getNumberDensity(nucName)
- ref = ndens
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
-
# test moles
cur = self.block.getPuMoles()
ndens = 0.0
@@ -1074,16 +1064,7 @@ def test_getPu(self):
* self.block.getVolume()
* self.block.getSymmetryFactor()
)
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
-
- # test mass
- cur = self.block.getPuMass()
- pu = 0.0
- for nucName in refDict.keys():
- if nucName in ["PU238", "PU239", "PU240", "PU241", "PU242"]:
- pu += self.block.getMass(nucName)
- self.assertAlmostEqual(cur, pu)
+ self.assertAlmostEqual(cur, ref, places=6)
def test_adjustDensity(self):
u235Dens = 0.003
@@ -1098,12 +1079,11 @@ def test_adjustDensity(self):
cur = self.block.getNumberDensity("U235")
ref = densAdj * u235Dens
- places = 6
- self.assertAlmostEqual(cur, ref, places=places)
+ self.assertAlmostEqual(cur, ref, places=9)
cur = self.block.getNumberDensity("U238")
ref = densAdj * u238Dens
- self.assertAlmostEqual(cur, ref, places=places)
+ self.assertAlmostEqual(cur, ref, places=9)
self.assertAlmostEqual(mass2 - mass1, massDiff)
@@ -1236,6 +1216,10 @@ def test_getComponentsOfMaterial(self):
],
)
+ # test edge case
+ cur = self.block.getComponentsOfMaterial(None, "UZr")
+ self.assertEqual(cur[0], ref)
+
def test_getComponentByName(self):
"""Test children by name.
@@ -1364,7 +1348,7 @@ def test_setLinPowByPin(self):
# Test with no powerKeySuffix
self.block.setPinPowers(neutronPower)
- assert_allclose(self.block.p[totalPowerKey], numpy.array(neutronPower))
+ assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower))
self.assertIsNone(self.block.p[neutronPowerKey])
self.assertIsNone(self.block.p[gammaPowerKey])
@@ -1373,8 +1357,8 @@ def test_setLinPowByPin(self):
neutronPower,
powerKeySuffix=NEUTRON,
)
- assert_allclose(self.block.p[totalPowerKey], numpy.array(neutronPower))
- assert_allclose(self.block.p[neutronPowerKey], numpy.array(neutronPower))
+ assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower))
+ assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower))
self.assertIsNone(self.block.p[gammaPowerKey])
# Test with gamma powers
@@ -1382,9 +1366,9 @@ def test_setLinPowByPin(self):
gammaPower,
powerKeySuffix=GAMMA,
)
- assert_allclose(self.block.p[totalPowerKey], numpy.array(totalPower))
- assert_allclose(self.block.p[neutronPowerKey], numpy.array(neutronPower))
- assert_allclose(self.block.p[gammaPowerKey], numpy.array(gammaPower))
+ assert_allclose(self.block.p[totalPowerKey], np.array(totalPower))
+ assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower))
+ assert_allclose(self.block.p[gammaPowerKey], np.array(gammaPower))
def test_getComponentAreaFrac(self):
def calcFracManually(names):
@@ -1466,33 +1450,33 @@ def test_106_getAreaFractions(self):
def test_rotatePins(self):
b = self.block
b.setRotationNum(0)
- index = b.rotatePins(0, justCompute=True)
+ index = b._rotatePins(0, justCompute=True)
self.assertEqual(b.getRotationNum(), 0)
self.assertEqual(index[5], 5)
self.assertEqual(index[2], 2) # pin 1 is center and never rotates.
- index = b.rotatePins(1)
+ index = b._rotatePins(1)
self.assertEqual(b.getRotationNum(), 1)
self.assertEqual(index[2], 3)
self.assertEqual(b.p.pinLocation[1], 3)
- index = b.rotatePins(1)
+ index = b._rotatePins(1)
self.assertEqual(b.getRotationNum(), 2)
self.assertEqual(index[2], 4)
self.assertEqual(b.p.pinLocation[1], 4)
- index = b.rotatePins(2)
- index = b.rotatePins(4) # over-rotate to check modulus
+ index = b._rotatePins(2)
+ index = b._rotatePins(4) # over-rotate to check modulus
self.assertEqual(b.getRotationNum(), 2)
self.assertEqual(index[2], 4)
self.assertEqual(index[6], 2)
self.assertEqual(b.p.pinLocation[1], 4)
self.assertEqual(b.p.pinLocation[5], 2)
- self.assertRaises(ValueError, b.rotatePins, -1)
- self.assertRaises(ValueError, b.rotatePins, 10)
- self.assertRaises((ValueError, TypeError), b.rotatePins, None)
- self.assertRaises((ValueError, TypeError), b.rotatePins, "a")
+ self.assertRaises(ValueError, b._rotatePins, -1)
+ self.assertRaises(ValueError, b._rotatePins, 10)
+ self.assertRaises((ValueError, TypeError), b._rotatePins, None)
+ self.assertRaises((ValueError, TypeError), b._rotatePins, "a")
def test_expandElementalToIsotopics(self):
r"""Tests the expand to elementals capability."""
@@ -1617,8 +1601,9 @@ def test_consistentMassDensityVolumeBetweenColdBlockAndColdComponents(self):
)
for expected, actual in zip(expectedData, actualData):
- msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
- expected[0], expected, actual
+ msg = (
+ "Data (component, density, volume, mass) for component {} does not match. "
+ "Expected: {}, Actual: {}".format(expected[0], expected, actual)
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
@@ -1634,8 +1619,9 @@ def test_consistentMassDensityVolumeBetweenHotBlockAndHotComponents(self):
)
for expected, actual in zip(expectedData, actualData):
- msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
- expected[0], expected, actual
+ msg = (
+ "Data (component, density, volume, mass) for component {} does not match. "
+ "Expected: {}, Actual: {}".format(expected[0], expected, actual)
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
@@ -1646,11 +1632,11 @@ def test_consistentAreaWithOverlappingComponents(self):
Notes
-----
- This test calculates a reference coolant area by subtracting the areas of the intercoolant, duct, wire wrap,
- and pins from the total hex block area.
- The area of the pins is calculated using only the outer radius of the clad.
- This avoids the use of negative areas as implemented in Block.getVolumeFractions.
- Na-23 mass will not be conserved as when duct/clad expands sodium is evacuated
+ This test calculates a reference coolant area by subtracting the areas of the intercoolant,
+ duct, wire wrap, and pins from the total hex block area. The area of the pins is calculated
+ using only the outer radius of the clad. This avoids the use of negative areas as
+ implemented in Block.getVolumeFractions. Na-23 mass will not be conserved as when duct/clad
+ expands sodium is evacuated.
See Also
--------
@@ -1686,8 +1672,8 @@ def test_consistentAreaWithOverlappingComponents(self):
self.assertAlmostEqual(totalHexArea, self.block.getArea())
self.assertAlmostEqual(ref, self.block.getComponent(Flags.COOLANT).getArea())
- self.assertTrue(numpy.allclose(numFE56, self.block.getNumberOfAtoms("FE56")))
- self.assertTrue(numpy.allclose(numU235, self.block.getNumberOfAtoms("U235")))
+ self.assertTrue(np.allclose(numFE56, self.block.getNumberOfAtoms("FE56")))
+ self.assertTrue(np.allclose(numU235, self.block.getNumberOfAtoms("U235")))
def _testDimensionsAreLinked(self):
prevC = None
@@ -1714,7 +1700,7 @@ def test_pinMgFluxes(self):
.. warning:: This will likely be pushed to the component level.
"""
- fluxes = numpy.ones((33, 10))
+ fluxes = np.ones((33, 10))
self.block.setPinMgFluxes(fluxes)
self.block.setPinMgFluxes(fluxes * 2, adjoint=True)
self.block.setPinMgFluxes(fluxes * 3, gamma=True)
@@ -1780,9 +1766,79 @@ def test_getReactionRates(self):
)
+class BlockEnergyDepositionConstants(unittest.TestCase):
+ """Tests the energy deposition methods.
+
+ MagicMocks xsCollections.compute*Constants() -- we're not testing those methods specifically
+ so just make sure they're hit
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.block = loadTestBlock()
+
+ def setUp(self):
+ self.block.core.lib = MagicMock()
+
+ @patch.object(xsCollections, "computeFissionEnergyGenerationConstants")
+ @patch.object(xsCollections, "computeCaptureEnergyGenerationConstants")
+ def test_getTotalEnergyGenerationConstants(self, mock_capture, mock_fission):
+ """Mock both xsCollections methods so you get complete coverage."""
+ _x = self.block.getTotalEnergyGenerationConstants()
+ self.assertEqual(mock_fission.call_count, 1)
+ self.assertEqual(mock_capture.call_count, 1)
+
+ @patch.object(xsCollections, "computeFissionEnergyGenerationConstants")
+ def test_getFissionEnergyDepositionConstants(self, mock_method):
+ """Test RuntimeError and that it gets to the deposition constant call."""
+ # make sure xsCollections.compute* gets hit
+ _x = self.block.getFissionEnergyGenerationConstants()
+ self.assertEqual(mock_method.call_count, 1)
+ # set core.lib to None and get RuntimeError
+ self.block.core.lib = None
+ with self.assertRaises(RuntimeError):
+ # fails because this test reactor does not have a cross-section library
+ _x = self.block.getFissionEnergyGenerationConstants()
+
+ @patch.object(xsCollections, "computeCaptureEnergyGenerationConstants")
+ def test_getCaptureEnergyGenerationConstants(self, mock_method):
+ """Test RuntimeError and that it gets to the deposition constant call."""
+ # make sure xsCollections.compute* gets hit
+ _x = self.block.getCaptureEnergyGenerationConstants()
+ self.assertEqual(mock_method.call_count, 1)
+ # set core.lib to None and get RuntimeError
+ self.block.core.lib = None
+ with self.assertRaises(RuntimeError):
+ # fails because this test reactor does not have a cross-section library
+ _x = self.block.getCaptureEnergyGenerationConstants()
+
+ @patch.object(xsCollections, "computeNeutronEnergyDepositionConstants")
+ def test_getNeutronEnergyDepositionConstants(self, mock_method):
+ """Test RuntimeError and that it gets to the deposition constant call."""
+ # make sure xsCollections.compute* gets hit
+ _x = self.block.getNeutronEnergyDepositionConstants()
+ self.assertEqual(mock_method.call_count, 1)
+ # set core.lib to None and get RuntimeError
+ self.block.core.lib = None
+ with self.assertRaises(RuntimeError):
+ _x = self.block.getNeutronEnergyDepositionConstants()
+
+ @patch.object(xsCollections, "computeGammaEnergyDepositionConstants")
+ def test_getGammaEnergyDepositionConstants(self, mock_method):
+ """Test RuntimeError and that it gets to the deposition constant call."""
+ # make sure xsCollections.compute* gets hit
+ _x = self.block.getGammaEnergyDepositionConstants()
+ self.assertEqual(mock_method.call_count, 1)
+ # set core.lib to None and get RuntimeError
+ self.block.core.lib = None
+ with self.assertRaises(RuntimeError):
+ # fails because this test reactor does not have a cross-section library
+ _x = self.block.getGammaEnergyDepositionConstants()
+
+
class TestNegativeVolume(unittest.TestCase):
def test_negativeVolume(self):
- """Build a block with WAY too many fuel pins and show that the derived volume is negative."""
+ """Build a Block with WAY too many fuel pins & show that the derived volume is negative."""
block = blocks.HexBlock("TestHexBlock")
coldTemp = 20
@@ -1891,12 +1947,12 @@ def test_coords(self):
:id: T_ARMI_BLOCK_POSI1
:tests: R_ARMI_BLOCK_POSI
"""
- r = self.HexBlock.r
+ core = self.HexBlock.core
a = self.HexBlock.parent
- loc1 = r.core.spatialGrid[0, 1, 0]
+ loc1 = core.spatialGrid[0, 1, 0]
a.spatialLocator = loc1
x0, y0 = self.HexBlock.coords()
- a.spatialLocator = r.core.spatialGrid[0, -1, 0] # symmetric
+ a.spatialLocator = core.spatialGrid[0, -1, 0] # symmetric
x2, y2 = self.HexBlock.coords()
a.spatialLocator = loc1
self.HexBlock.p.displacementX = 0.01
@@ -1916,8 +1972,7 @@ def test_getNumPins(self):
def test_block_dims(self):
"""
- Tests that the block class can provide basic dimensionality information about
- itself.
+ Tests that the block class can provide basic dimensionality information about itself.
.. test:: Important block dimensions are retrievable.
:id: T_ARMI_BLOCK_DIMS
@@ -1938,7 +1993,7 @@ def test_block_dims(self):
def test_symmetryFactor(self):
# full hex
- self.HexBlock.spatialLocator = self.HexBlock.r.core.spatialGrid[2, 0, 0]
+ self.HexBlock.spatialLocator = self.HexBlock.core.spatialGrid[2, 0, 0]
self.HexBlock.clearCache()
self.assertEqual(1.0, self.HexBlock.getSymmetryFactor())
a0 = self.HexBlock.getArea()
@@ -1946,16 +2001,13 @@ def test_symmetryFactor(self):
m0 = self.HexBlock.getMass()
# 1/3 symmetric
- self.HexBlock.spatialLocator = self.HexBlock.r.core.spatialGrid[0, 0, 0]
+ self.HexBlock.spatialLocator = self.HexBlock.core.spatialGrid[0, 0, 0]
self.HexBlock.clearCache()
self.assertEqual(3.0, self.HexBlock.getSymmetryFactor())
self.assertEqual(a0 / 3.0, self.HexBlock.getArea())
self.assertEqual(v0 / 3.0, self.HexBlock.getVolume())
self.assertAlmostEqual(m0 / 3.0, self.HexBlock.getMass())
- symmetryLine = self.HexBlock.isOnWhichSymmetryLine()
- self.assertEqual(grids.BOUNDARY_CENTER, symmetryLine)
-
def test_retainState(self):
"""Ensure retainState restores params and spatialGrids."""
self.HexBlock.spatialGrid = grids.HexGrid.fromPitch(1.0)
@@ -2003,8 +2055,8 @@ def test_getPitchHomogeneousBlock(self):
Notes
-----
- This assumes there are 3 materials in the homogeneous block, one with half
- the area fraction, and 2 with 1/4 each.
+ This assumes there are 3 materials in the homogeneous block, one with half the area
+ fraction, and 2 with 1/4 each.
"""
desiredPitch = 14.0
hexTotalArea = hexagon.area(desiredPitch)
@@ -2013,18 +2065,17 @@ def test_getPitchHomogeneousBlock(self):
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
- # There are 2 ways to do this, the first is to pick a component to be the pitch
- # defining component, and given it the shape of a hexagon to define the pitch
- # The hexagon outer pitch (op) is defined by the pitch of the block/assembly.
- # the ip is defined by whatever thickness is necessary to have the desired area
- # fraction. The second way is shown in the second half of this test.
+ # There are 2 ways to do this, the first is to pick a component to be the pitch defining
+ # component, and given it the shape of a hexagon to define the pitch. The hexagon outer
+ # pitch (op) is defined by the pitch of the block/assembly. The ip is defined by whatever
+ # thickness is necessary to have the desired area fraction. The second way is shown in the
+ # second half of this test.
hexBlock = blocks.HexBlock("TestHexBlock")
hexComponentArea = areaFractions[0] * hexTotalArea
- # Picking 1st material to use for the hex component here, but really the choice
- # is arbitrary.
- # area grows quadratically with op
+ # Picking 1st material to use for the hex component here, but really the choice is
+ # arbitrary. area grows quadratically with op
ipNeededForCorrectArea = desiredPitch * areaFractions[0] ** 0.5
self.assertEqual(
hexComponentArea, hexTotalArea - hexagon.area(ipNeededForCorrectArea)
@@ -2049,10 +2100,10 @@ def test_getPitchHomogeneousBlock(self):
self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())
self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)
- # For this second way, we will simply define the 3 components as unshaped, with
- # the desired area fractions, and make a 4th component that is an infinitely
- # thin hexagon with the the desired pitch. The downside of this method is that
- # now the block has a fourth component with no volume.
+ # For this second way, we will simply define the 3 components as unshaped, with the desired
+ # area fractions, and make a 4th component that is an infinitely thin hexagon with the the
+ # desired pitch. The downside of this method is that now the block has a fourth component
+ # with no volume.
hexBlock = blocks.HexBlock("TestHexBlock")
for aFrac, material in zip(areaFractions, materials):
unshapedArgs = {"area": hexTotalArea * aFrac}
@@ -2085,8 +2136,7 @@ def test_getPinCenterFlatToFlat(self):
self.assertAlmostEqual(pinCenterFlatToFlat, f2f)
def test_gridCreation(self):
- """Create a grid for a block, and show that it can handle components with
- multiplicity > 1.
+ """Create a grid for a block, and show that it can handle components with multiplicity > 1.
.. test:: Grids can handle components with multiplicity > 1.
:id: T_ARMI_GRID_MULT
@@ -2264,9 +2314,8 @@ def test_axial(self):
def test_verifyBlockDims(self):
"""
- This function is currently null. It consists of a single line that
- returns nothing. This test covers that line. If the function is ever
- implemented, it can be tested here.
+ This function is currently null. It consists of a single line that returns nothing. This
+ test covers that line. If the function is ever implemented, it can be tested here.
"""
self.ThRZBlock.verifyBlockDims()
@@ -2319,8 +2368,8 @@ def test_getPitchHomogeneousBlock(self):
Notes
-----
- This assumes there are 3 materials in the homogeneous block, one with half
- the area fraction, and 2 with 1/4 each.
+ This assumes there are 3 materials in the homogeneous block, one with half the area
+ fraction, and 2 with 1/4 each.
"""
desiredPitch = (10.0, 12.0)
rectTotalArea = desiredPitch[0] * desiredPitch[1]
@@ -2329,24 +2378,21 @@ def test_getPitchHomogeneousBlock(self):
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
- # There are 2 ways to do this, the first is to pick a component to be the pitch
- # defining component, and given it the shape of a rectangle to define the pitch
- # The rectangle outer dimensions is defined by the pitch of the block/assembly.
- # the inner dimensions is defined by whatever thickness is necessary to have
- # the desired area fraction.
- # The second way is to define all physical material components as unshaped, and
- # add an additional infinitely thin Void component (no area) that defines pitch.
- # See second part of HexBlock_TestCase.test_getPitchHomogeneousBlock for
- # demonstration.
+ # There are 2 ways to do this, the first is to pick a component to be the pitch defining
+ # component, and given it the shape of a rectangle to define the pitch. The rectangle outer
+ # dimensions is defined by the pitch of the block/assembly. The inner dimensions is defined
+ # by whatever thickness is necessary to have the desired area fraction. The second way is to
+ # define all physical material components as unshaped, and add an additional infinitely thin
+ # Void component (no area) that defines pitch. See second part of
+ # HexBlock_TestCase.test_getPitchHomogeneousBlock for demonstration.
cartBlock = blocks.CartesianBlock("TestCartBlock")
hexComponentArea = areaFractions[0] * rectTotalArea
- # Picking 1st material to use for the hex component here, but really the choice
- # is arbitrary.
+ # Picking 1st material to use for the hex component here, but really the choice is
+ # arbitrary.
# area grows quadratically with outer dimensions.
- # Note there are infinitely many inner dims that would preserve area,
- # this is just one of them.
+ # Note there are infinitely many inner dims that would preserve area, this is just one.
innerDims = [dim * areaFractions[0] ** 0.5 for dim in desiredPitch]
self.assertAlmostEqual(
hexComponentArea, rectTotalArea - innerDims[0] * innerDims[1]
@@ -2393,28 +2439,24 @@ def test_getHydraulicDiameter(self):
class MassConservationTests(unittest.TestCase):
- r"""Tests designed to verify mass conservation during thermal expansion."""
+ """Tests designed to verify mass conservation during thermal expansion."""
def setUp(self):
self.b = buildSimpleFuelBlock()
def test_heightExpansionDifferences(self):
- r"""The point of this test is to determine if the number densities stay the same
- with two different heights of the same block. Since we want to expand a block
- from cold temperatures to hot using the fuel expansion coefficient (most important neutronicall),
- other components are not grown correctly. This means that on the block level, axial expansion will
- NOT conserve mass of non-fuel components. However, the excess mass is simply added to the top of
+ """The point of this test is to determine if the number densities stay the same with two
+ different heights of the same block. Since we want to expand a block from cold temperatures
+ to hot using the fuel expansion coefficient (most important neutronicall), other components
+ are not grown correctly. This means that on the block level, axial expansion will NOT
+ conserve mass of non-fuel components. However, the excess mass is simply added to the top of
the reactor in the plenum regions (or any non fueled region).
"""
- # assume the default block height is 'cold' height. Now we must determine
- # what the hot height should be based on thermal expansion. Change the height
- # of the block based on the different thermal expansions of the components then
- # see the effect on the number densities.
-
+ # Assume the default block height is 'cold' height. Now we must determine what the hot
+ # height should be based on thermal expansion. Change the height of the block based on the
+ # different thermal expansions of the components then see the effect on number densities.
fuel = self.b.getComponent(Flags.FUEL)
-
height = self.b.getHeight()
-
Thot = fuel.temperatureInC
Tcold = fuel.inputTemperatureInC
@@ -2444,16 +2486,18 @@ def test_heightExpansionDifferences(self):
hotFuelU238,
hotCladU238,
10,
- "Number Density of fuel in one height ({0}) != number density of fuel at another height {1}. Number density conservation "
- "violated during thermal expansion".format(hotFuelU238, hotCladU238),
+ "Number Density of fuel in one height ({0}) != number density of fuel at another "
+ "height {1}. Number density conservation violated during thermal "
+ "expansion".format(hotFuelU238, hotCladU238),
)
self.assertAlmostEqual(
hotFuelIRON,
hotCladIRON,
10,
- "Number Density of clad in one height ({0}) != number density of clad at another height {1}. Number density conservation "
- "violated during thermal expansion".format(hotFuelIRON, hotCladIRON),
+ "Number Density of clad in one height ({0}) != number density of clad at another "
+ "height {1}. Number density conservation violated during thermal "
+ "expansion".format(hotFuelIRON, hotCladIRON),
)
def test_massFuelHeatup(self):
@@ -2466,8 +2510,8 @@ def test_massFuelHeatup(self):
massCold,
massHot,
10,
- "Cold mass of fuel ({0}) != hot mass {1}. Mass conservation "
- "violated during thermal expansion".format(massCold, massHot),
+ "Cold mass of fuel ({0}) != hot mass {1}. Mass conservation violated during thermal "
+ "expansion".format(massCold, massHot),
)
def test_massCladHeatup(self):
@@ -2480,8 +2524,8 @@ def test_massCladHeatup(self):
massCold,
massHot,
10,
- "Cold mass of clad ({0}) != hot mass {1}. Mass conservation "
- "violated during thermal expansion".format(massCold, massHot),
+ "Cold mass of clad ({0}) != hot mass {1}. Mass conservation violated during thermal "
+ "expansion".format(massCold, massHot),
)
def test_massDuctHeatup(self):
@@ -2508,10 +2552,8 @@ def test_massCoolHeatup(self):
self.assertGreater(
massCold,
massHot,
- "Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation "
- "not violated during thermal expansion of coolant".format(
- massCold, massHot
- ),
+ "Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation not violated during "
+ "thermal expansion of coolant".format(massCold, massHot),
)
def test_dimensionDuctHeatup(self):
@@ -2525,8 +2567,8 @@ def test_dimensionDuctHeatup(self):
correctHot,
pitchHot,
10,
- "Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion "
- "violated during heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format(
+ "Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion violated during "
+ "heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format(
correctHot,
pitchHot,
tc=duct.temperatureInC,
@@ -2540,8 +2582,8 @@ def test_coldMass(self):
"""
Verify that the cold mass is what it should be, even though the hot height is input.
- At the cold temperature (but with hot height), the mass should be the same as at hot temperature
- and hot height.
+ At the cold temperature (but with hot height), the mass should be the same as at hot
+ temperature and hot height.
"""
fuel = self.b.getComponent(Flags.FUEL)
# set ref (input/cold) temperature.
@@ -2570,7 +2612,7 @@ def test_coldMass(self):
)
def test_massConsistency(self):
- r"""Verify that the sum of the component masses equals the total mass."""
+ """Verify that the sum of the component masses equals the total mass."""
tMass = 0.0
for child in self.b:
tMass += child.getMass()
@@ -2581,3 +2623,23 @@ def test_massConsistency(self):
10,
"Sum of component mass {0} != total block mass {1}. ".format(tMass, bMass),
)
+
+
+class EmptyBlockRotateTest(unittest.TestCase):
+ """Rotation tests on an empty hexagonal block.
+
+ Useful for enforcing rotation works on blocks without pins.
+
+ """
+
+ def setUp(self):
+ self.block = blocks.HexBlock("empty")
+
+ def test_orientation(self):
+ """Test the orientation parameter is updated on a rotated empty block."""
+ rotDegrees = 60
+ preRotateOrientation = self.block.p.orientation[2]
+ self.block.rotate(math.radians(rotDegrees))
+ postRotationOrientation = self.block.p.orientation[2]
+ self.assertNotEqual(preRotateOrientation, postRotationOrientation)
+ self.assertEqual(postRotationOrientation, rotDegrees)
diff --git a/armi/reactor/tests/test_components.py b/armi/reactor/tests/test_components.py
index fdd031f43..46b936091 100644
--- a/armi/reactor/tests/test_components.py
+++ b/armi/reactor/tests/test_components.py
@@ -17,6 +17,7 @@
import math
import unittest
+from armi.materials import air, alloy200
from armi.materials.material import Material
from armi.reactor import components
from armi.reactor import flags
@@ -44,7 +45,7 @@
ComponentType,
)
from armi.reactor.components import materials
-from armi.materials import air, alloy200
+from armi.reactor.tests.test_reactors import loadTestReactor
class TestComponentFactory(unittest.TestCase):
@@ -482,6 +483,31 @@ def test_computeVolume(self):
self.assertAlmostEqual(c.computeVolume(), 1386.5232044586771)
+class TestDerivedShapeGetArea(unittest.TestCase):
+ def test_getAreaColdTrue(self):
+ """Prove that the DerivedShape.getArea() works at cold=True."""
+ # load one-block test reactor
+ _o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ b = r.core[0][0]
+
+ # ensure there is a DerivedShape in this Block
+ shapes = set([type(c) for c in b])
+ self.assertIn(Circle, shapes)
+ self.assertIn(DerivedShape, shapes)
+ self.assertIn(Helix, shapes)
+ self.assertIn(Hexagon, shapes)
+
+ # prove that getArea works on the block level
+ self.assertAlmostEqual(b.getArea(cold=True), b.getArea(cold=False), delta=1e-10)
+
+ # prove that getArea preserves the sum of all the areas, even if there is a DerivedShape
+ totalAreaCold = sum([c.getArea(cold=True) for c in b])
+ totalAreaHot = sum([c.getArea(cold=False) for c in b])
+ self.assertAlmostEqual(totalAreaCold, totalAreaHot, delta=1e-10)
+
+
class TestCircle(TestShapedComponent):
"""Test circle shaped component."""
@@ -578,19 +604,17 @@ def test_getArea(self):
def test_componentInteractionsLinkingByDimensions(self):
"""Tests linking of Components by dimensions.
- .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid Components that bound it.
+ The component ``gap``, representing the fuel-clad gap filled with Void, is defined with
+ dimensions that depend on the fuel outer diameter and clad inner diameter. The
+ :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims` method links the
+ gap dimensions appropriately when the Component is constructed, and the test shows the area
+ of the gap is calculated correctly based on the thermally-expanded dimensions of the fuel
+ and clad Components.
+
+ .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid
+ Components that bound it.
:id: T_ARMI_COMP_FLUID1
:tests: R_ARMI_COMP_FLUID
-
- The component ``gap``, representing the fuel-clad gap filled with Void,
- is defined with dimensions that depend on the fuel outer diameter and
- clad inner diameter. The
- :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims`
- method links the gap dimensions appropriately when the Component is
- constructed, and the test shows the area of the gap is calculated
- correctly based on the thermally-expanded dimensions of the fuel and
- clad Components.
-
"""
nPins = 217
fuelDims = {"Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins}
@@ -684,6 +708,22 @@ def test_fuelMass(self):
self.component.p.flags = flags.Flags.MODERATOR
self.assertEqual(self.component.getFuelMass(), 0.0)
+ def test_theoreticalDensitySetter(self):
+ """Ensure only fraction theoretical densities are supported."""
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 1)
+ with self.assertRaises(ValueError):
+ self.component.p.theoreticalDensityFrac = 2.0
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 1)
+ self.component.p.theoreticalDensityFrac = 0.2
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2)
+ with self.assertRaises(ValueError):
+ self.component.p.theoreticalDensityFrac = -1.0
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2)
+ self.component.p.theoreticalDensityFrac = 1.0
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 1)
+ self.component.p.theoreticalDensityFrac = 0.0
+ self.assertEqual(self.component.p.theoreticalDensityFrac, 0)
+
class TestComponentExpansion(unittest.TestCase):
tCold = 20
@@ -770,23 +810,23 @@ def expansionConservationHotHeightDefined(self, mat: str, isotope: str):
circle.material.density(Tc=circle.temperatureInC),
)
- # brief 2D expansion with set temp to show mass is conserved
- # hot height would come from block value
+ # brief 2D expansion with set temp to show mass is conserved hot height would come from
+ # block value
warmMass = circle1.density() * circle1.getArea() * hotHeight
circle1.setTemperature(self.tHot)
hotMass = circle1.density() * circle1.getArea() * hotHeight
self.assertAlmostEqual(warmMass, hotMass)
circle1.setTemperature(self.tWarm)
- # Change temp to circle 2 temp to show equal to circle2
- # and then change back to show recoverable to original values
+ # Change temp to circle 2 temp to show equal to circle2 and then change back to show
+ # recoverable to original values
oldArea = circle1.getArea()
initialDens = circle1.density()
# when block.setHeight is called (which effectively changes component height)
- # component.setNumberDensity is called (for solid isotopes) to adjust the number
- # density so that now the 2D expansion will be approximated/expanded around
- # the hot temp which is akin to these adjustments
+ # component.setNumberDensity is called (for solid isotopes) to adjust the number density so
+ # that now the 2D expansion will be approximated/expanded around the hot temp which is akin
+ # to these adjustments
heightFactor = circle1.getHeightFactor(self.tHot)
circle1.adjustDensityForHeightExpansion(self.tHot) # apply temp at new height
circle1.setTemperature(self.tHot)
@@ -1764,3 +1804,11 @@ def test_adjustMassEnrichment(self):
def test_getEnrichment(self):
self.fuel.adjustMassEnrichment(0.3)
self.assertAlmostEqual(self.fuel.getEnrichment(), 0.3)
+
+ def test_finalizeLoadDBAdjustsTD(self):
+ """Ensure component is fully loaded through finalize methods."""
+ tdFrac = 0.54321
+ comp = self.fuel
+ comp.p.theoreticalDensityFrac = tdFrac
+ comp.finalizeLoadingFromDB()
+ self.assertEqual(comp.material.getTD(), tdFrac)
diff --git a/armi/reactor/tests/test_composites.py b/armi/reactor/tests/test_composites.py
index bd5c65d6b..5a60cc5a1 100644
--- a/armi/reactor/tests/test_composites.py
+++ b/armi/reactor/tests/test_composites.py
@@ -14,6 +14,7 @@
"""Tests for the composite pattern."""
from copy import deepcopy
+import logging
import unittest
from armi import nuclearDataIO
@@ -34,7 +35,9 @@
from armi.reactor.composites import getReactionRateDict
from armi.reactor.flags import Flags, TypeSpec
from armi.reactor.tests.test_blocks import loadTestBlock
+from armi.reactor.tests.test_reactors import loadTestReactor
from armi.tests import ISOAA_PATH
+from armi.tests import mockRunLogs
class MockBP:
@@ -310,15 +313,105 @@ def test_expandLFPs(self):
self.assertEqual(len(numDens), 9)
self.assertEqual(numDens["MO99"], 0)
+ def test_setChildrenLumpedFissionProducts(self):
+ # build a lumped fission product collection
+ fpd = getDummyLFPFile()
+ lfps = fpd.createLFPsFromFile()
+
+ # validate that the LFP collection is None
+ self.container.setChildrenLumpedFissionProducts(None)
+ for c in self.container.getChildren():
+ self.assertIsNone(c._lumpedFissionProducts)
+
+ # validate that the LFP collection is not None
+ self.container.setChildrenLumpedFissionProducts(lfps)
+ for c in self.container.getChildren():
+ self.assertIsNotNone(c._lumpedFissionProducts)
+
+ def test_requiresLumpedFissionProducts(self):
+ # build a lumped fission product collection
+ fpd = getDummyLFPFile()
+ lfps = fpd.createLFPsFromFile()
+ self.container.setChildrenLumpedFissionProducts(lfps)
+
+ # test the null case
+ result = self.container.requiresLumpedFissionProducts(None)
+ self.assertFalse(result)
+
+ # test the usual case
+ result = self.container.requiresLumpedFissionProducts(set())
+ self.assertFalse(result)
+
+ # test a positive case
+ result = self.container.requiresLumpedFissionProducts(["LFP35"])
+ self.assertTrue(result)
+
+ def test_getLumpedFissionProductsIfNecessaryNullCase(self):
+ # build a lumped fission product collection
+ fpd = getDummyLFPFile()
+ lfps = fpd.createLFPsFromFile()
+ self.container.setChildrenLumpedFissionProducts(lfps)
+
+ # test the null case
+ result = self.container.getLumpedFissionProductsIfNecessary(None)
+ self.assertEqual(len(result), 0)
+
+ # test a positive case
+ result = self.container.getLumpedFissionProductsIfNecessary(["LFP35"])
+ self.assertGreater(len(result), 0)
+
def test_getIntegratedMgFlux(self):
mgFlux = self.container.getIntegratedMgFlux()
self.assertEqual(mgFlux, [0.0])
def test_getReactionRates(self):
+ # test the null case
rRates = self.container.getReactionRates("U235")
self.assertEqual(len(rRates), 6)
self.assertEqual(sum([r for r in rRates.values()]), 0)
+ # init reactor
+ _o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+ lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH)
+ r.core.lib = lib
+
+ # test on a Component
+ b = r.core.getFirstAssembly().getFirstBlock()
+ b.p.mgFlux = 1
+ c = b.getComponents()[0]
+ rRatesComp = c.getReactionRates("U235")
+ self.assertEqual(len(rRatesComp), 6)
+ self.assertGreater(sum([r for r in rRatesComp.values()]), 0)
+
+ # test on a Block
+ rRatesBlock = b.getReactionRates("U235")
+ self.assertEqual(len(rRatesBlock), 6)
+ self.assertGreater(sum([r for r in rRatesBlock.values()]), 0)
+
+ # test on an Assembly
+ assem = r.core.getFirstAssembly()
+ rRatesAssem = assem.getReactionRates("U235")
+ self.assertEqual(len(rRatesAssem), 6)
+ self.assertGreater(sum([r for r in rRatesAssem.values()]), 0)
+
+ # test on a Core
+ rRatesCore = r.core.getReactionRates("U235")
+ self.assertEqual(len(rRatesCore), 6)
+ self.assertGreater(sum([r for r in rRatesCore.values()]), 0)
+
+ # test on a Reactor
+ rRatesReactor = r.getReactionRates("U235")
+ self.assertEqual(len(rRatesReactor), 6)
+ self.assertGreater(sum([r for r in rRatesReactor.values()]), 0)
+
+ # test that all different levels of the heirarchy have the same reaction rates
+ for key, val in rRatesBlock.items():
+ self.assertAlmostEqual(rRatesAssem[key], val)
+ self.assertAlmostEqual(rRatesCore[key], val)
+ self.assertAlmostEqual(rRatesReactor[key], val)
+
def test_syncParameters(self):
data = [{"serialNum": 123}, {"flags": "FAKE"}]
numSynced = self.container._syncParameters(data, {})
@@ -326,7 +419,6 @@ def test_syncParameters(self):
class TestCompositeTree(unittest.TestCase):
-
blueprintYaml = """
name: test assembly
height: [1, 1] # 2 blocks
@@ -386,13 +478,13 @@ class TestCompositeTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
- self.Block = None
+ self.block = None
self.r = None
def setUp(self):
- self.Block = loadTestBlock()
- self.r = self.Block.r
- self.Block.setHeight(100.0)
+ self.block = loadTestBlock()
+ self.r = self.block.core.r
+ self.block.setHeight(100.0)
self.refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
@@ -404,28 +496,28 @@ def setUp(self):
"NA23": 2e-2,
"ZR": 0.00709003962772,
}
- self.Block.setNumberDensities(self.refDict)
+ self.block.setNumberDensities(self.refDict)
def test_ordering(self):
a = assemblies.Assembly("dummy")
- a.spatialGrid = grids.axialUnitGrid(2, armiObject=a)
- otherBlock = deepcopy(self.Block)
- a.add(self.Block)
+ a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a)
+ otherBlock = deepcopy(self.block)
+ a.add(self.block)
a.add(otherBlock)
- self.assertTrue(self.Block < otherBlock)
- locator = self.Block.spatialLocator
- self.Block.spatialLocator = otherBlock.spatialLocator
+ self.assertTrue(self.block < otherBlock)
+ locator = self.block.spatialLocator
+ self.block.spatialLocator = otherBlock.spatialLocator
otherBlock.spatialLocator = locator
- self.assertTrue(otherBlock < self.Block)
+ self.assertTrue(otherBlock < self.block)
def test_summing(self):
a = assemblies.Assembly("dummy")
- a.spatialGrid = grids.axialUnitGrid(2, armiObject=a)
- otherBlock = deepcopy(self.Block)
- a.add(self.Block)
+ a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a)
+ otherBlock = deepcopy(self.block)
+ a.add(self.block)
a.add(otherBlock)
- b = self.Block + otherBlock
+ b = self.block + otherBlock
self.assertEqual(len(b), 26)
self.assertFalse(b[0].is3D)
self.assertIn("Circle", str(b[0]))
@@ -439,8 +531,11 @@ def test_constituentReport(self):
runLog.info(self.r.core.getFirstBlock().getComponents()[0].constituentReport())
def test_getNuclides(self):
- """The getNuclides should return all keys that have ever been in this block, including values that are at trace."""
- cur = self.Block.getNuclides()
+ """
+ The getNuclides should return all keys that have ever been in this block, including values
+ that are at trace.
+ """
+ cur = self.block.getNuclides()
ref = self.refDict.keys()
for key in ref:
self.assertIn(key, cur)
@@ -448,9 +543,8 @@ def test_getNuclides(self):
def test_getFuelMass(self):
"""
- This test creates a dummy assembly and ensures that the assembly, block, and fuel component masses are
- consistent.
- `getFuelMass` ensures that the fuel component is used to `getMass`.
+ This test creates a dummy assembly and ensures that the assembly, block, and fuel component
+ masses are consistent. `getFuelMass` ensures that the fuel component is used to `getMass`.
"""
cs = settings.Settings()
assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml)
@@ -464,18 +558,6 @@ def test_getFuelMass(self):
self.assertEqual(fuelMass, a.getFuelMass())
- def test_getNeutronEnergyDepositionConstants(self):
- """Until we improve test architecture, this test can not be more interesting."""
- with self.assertRaises(RuntimeError):
- # fails because this test reactor does not have a cross-section library
- _x = self.r.core.getNeutronEnergyDepositionConstants()
-
- def test_getGammaEnergyDepositionConstants(self):
- """Until we improve test architecture, this test can not be more interesting."""
- with self.assertRaises(RuntimeError):
- # fails because this test reactor does not have a cross-section library
- _x = self.r.core.getGammaEnergyDepositionConstants()
-
def test_getChildrenIncludeMaterials(self):
"""Test that the ``StateRetainer`` retains material properties when they are modified."""
cs = settings.Settings()
@@ -488,9 +570,9 @@ def test_getChildrenIncludeMaterials(self):
def test_getHMMass(self):
fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0}
self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims)
- self.Block.add(self.fuelComponent)
+ self.block.add(self.fuelComponent)
- self.Block.clearNumberDensities()
+ self.block.clearNumberDensities()
self.refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
@@ -502,14 +584,14 @@ def test_getHMMass(self):
"NA23": 2e-2,
"ZR": 0.00709003962772,
}
- self.Block.setNumberDensities(self.refDict)
+ self.block.setNumberDensities(self.refDict)
- cur = self.Block.getHMMass()
+ cur = self.block.getHMMass()
mass = 0.0
for nucName in self.refDict.keys():
if nucDir.isHeavyMetal(nucName):
- mass += self.Block.getMass(nucName)
+ mass += self.block.getMass(nucName)
places = 6
self.assertAlmostEqual(cur, mass, places=places)
@@ -518,28 +600,28 @@ def test_getFPMass(self):
fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0}
self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims)
self.fuelComponent.material.setMassFrac("LFP38", 0.25)
- self.Block.add(self.fuelComponent)
+ self.block.add(self.fuelComponent)
refDict = {"LFP35": 0.1, "LFP38": 0.05, "LFP39": 0.7}
self.fuelComponent.setNumberDensities(refDict)
- cur = self.Block.getFPMass()
+ cur = self.block.getFPMass()
mass = 0.0
for nucName in refDict.keys():
- mass += self.Block.getMass(nucName)
+ mass += self.block.getMass(nucName)
ref = mass
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getFissileMass(self):
- cur = self.Block.getFissileMass()
+ cur = self.block.getFissileMass()
mass = 0.0
for nucName in self.refDict.keys():
if nucName in nuclideBases.NuclideBase.fissile:
- mass += self.Block.getMass(nucName)
+ mass += self.block.getMass(nucName)
ref = mass
places = 6
@@ -552,12 +634,12 @@ def test_getMaxParam(self):
:id: T_ARMI_CMP_PARAMS0
:tests: R_ARMI_CMP_PARAMS
"""
- for ci, c in enumerate(self.Block):
+ for ci, c in enumerate(self.block):
if isinstance(c, basicShapes.Circle):
c.p.id = ci
lastSeen = c
lastIndex = ci
- cMax, comp = self.Block.getMaxParam("id", returnObj=True)
+ cMax, comp = self.block.getMaxParam("id", returnObj=True)
self.assertEqual(cMax, lastIndex)
self.assertIs(comp, lastSeen)
@@ -568,12 +650,12 @@ def test_getMinParam(self):
:id: T_ARMI_CMP_PARAMS1
:tests: R_ARMI_CMP_PARAMS
"""
- for ci, c in reversed(list(enumerate(self.Block))):
+ for ci, c in reversed(list(enumerate(self.block))):
if isinstance(c, basicShapes.Circle):
c.p.id = ci
lastSeen = c
lastIndex = ci
- cMax, comp = self.Block.getMinParam("id", returnObj=True)
+ cMax, comp = self.block.getMinParam("id", returnObj=True)
self.assertEqual(cMax, lastIndex)
self.assertIs(comp, lastSeen)
@@ -612,10 +694,19 @@ def test_flagSerialization(self):
# missing flags in current version Flags
attrs["flag_order"].append("NONEXISTANTFLAG")
- with self.assertRaises(ValueError):
+ with mockRunLogs.BufferLog() as mock:
+ self.assertEqual("", mock.getStdout())
+ testName = "test_flagSerialization"
+ runLog.LOG.startLog(testName)
+ runLog.LOG.setVerbosity(logging.WARNING)
+
data2 = composites.FlagSerializer.unpack(
flagsArray, composites.FlagSerializer.version, attrs
)
+ flagLog = mock.getStdout()
+
+ self.assertIn("The set of flags", flagLog)
+ self.assertIn("NONEXISTANTFLAG", flagLog)
def test_flagConversion(self):
data = [
@@ -760,10 +851,6 @@ def test_dimensionReport(self):
report = self.obj.setComponentDimensionsReport()
self.assertEqual(len(report), len(self.obj))
- def test_printDensities(self):
- lines = self.obj.printDensities()
- self.assertEqual(len(lines), len(self.obj.getNuclides()))
-
def test_getAtomicWeight(self):
weight = self.obj.getAtomicWeight()
self.assertTrue(50 < weight < 100)
diff --git a/armi/reactor/tests/test_flags.py b/armi/reactor/tests/test_flags.py
index bbc4c1cb8..7155cae95 100644
--- a/armi/reactor/tests/test_flags.py
+++ b/armi/reactor/tests/test_flags.py
@@ -38,6 +38,14 @@ def test_flagsToAndFromString(self):
self.assertEqual(flags.Flags.toString(f), "FUEL")
self.assertEqual(f, flags.Flags.fromString("FUEL"))
+ def test_toStringAlphabetical(self):
+ """Ensure that, for multiple flags, toString() returns them in alphabetical order."""
+ flagz = flags.Flags.AXIAL | flags.Flags.LOWER
+ self.assertEqual(flags.Flags.toString(flagz), "AXIAL LOWER")
+
+ flagz = flags.Flags.LOWER | flags.Flags.AXIAL
+ self.assertEqual(flags.Flags.toString(flagz), "AXIAL LOWER")
+
def test_fromStringStrict(self):
self._help_fromString(flags.Flags.fromString)
with self.assertRaises(flags.InvalidFlagsError):
diff --git a/armi/reactor/tests/test_parameters.py b/armi/reactor/tests/test_parameters.py
index e2068e8ae..38f47141b 100644
--- a/armi/reactor/tests/test_parameters.py
+++ b/armi/reactor/tests/test_parameters.py
@@ -456,10 +456,18 @@ class MockPC(parameters.ParameterCollection):
self.assertEqual(p2.categories, set(["awesome", "stuff", "bacon"]))
self.assertEqual(p3.categories, set(["bacon"]))
+ for p in [p1, p2, p3]:
+ self._testCategoryConsistency(p)
+
self.assertEqual(set(pc.paramDefs.inCategory("awesome")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("stuff")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("bacon")), set([p2, p3]))
+ def _testCategoryConsistency(self, p: parameters.Parameter):
+ for category in p.categories:
+ self.assertTrue(p.hasCategory(category))
+ self.assertFalse(p.hasCategory("this_shouldnot_exist"))
+
def test_parameterCollectionsHave__slots__(self):
"""Tests we prevent accidental creation of attributes."""
self.assertEqual(
@@ -502,3 +510,80 @@ class MockPCChild(MockPC):
pcc = MockPCChild()
with self.assertRaises(AssertionError):
pcc.whatever = 33
+
+
+class ParamCollectionWhere(unittest.TestCase):
+ """Tests for ParameterCollection.where."""
+
+ class ScopeParamCollection(parameters.ParameterCollection):
+ pDefs = parameters.ParameterDefinitionCollection()
+ with pDefs.createBuilder() as pb:
+ pb.defParam(
+ name="empty",
+ description="Bare",
+ location=None,
+ categories=None,
+ units="",
+ )
+ pb.defParam(
+ name="keff",
+ description="keff",
+ location=parameters.ParamLocation.VOLUME_INTEGRATED,
+ categories=[parameters.Category.neutronics],
+ units="",
+ )
+ pb.defParam(
+ name="cornerFlux",
+ description="corner flux",
+ location=parameters.ParamLocation.CORNERS,
+ categories=[
+ parameters.Category.neutronics,
+ ],
+ units="",
+ )
+ pb.defParam(
+ name="edgeTemperature",
+ description="edge temperature",
+ location=parameters.ParamLocation.EDGES,
+ categories=[parameters.Category.thermalHydraulics],
+ units="",
+ )
+
+ @classmethod
+ def setUpClass(cls) -> None:
+ """Define a couple useful parameters with categories, locations, etc."""
+ cls.pc = cls.ScopeParamCollection()
+
+ def test_onCategory(self):
+ """Test the use of Parameter.hasCategory on filtering."""
+ names = {"keff", "cornerFlux"}
+ for p in self.pc.where(
+ lambda pd: pd.hasCategory(parameters.Category.neutronics)
+ ):
+ self.assertTrue(p.hasCategory(parameters.Category.neutronics), msg=p)
+ names.remove(p.name)
+ self.assertFalse(names, msg=f"{names=} should be empty!")
+
+ def test_onLocation(self):
+ """Test the use of Parameter.atLocation in filtering."""
+ names = {"edgeTemperature"}
+ for p in self.pc.where(
+ lambda pd: pd.atLocation(parameters.ParamLocation.EDGES)
+ ):
+ self.assertTrue(p.atLocation(parameters.ParamLocation.EDGES), msg=p)
+ names.remove(p.name)
+ self.assertFalse(names, msg=f"{names=} should be empty!")
+
+ def test_complicated(self):
+ """Test a multi-condition filter."""
+ names = {"cornerFlux"}
+
+ def check(p: parameters.Parameter) -> bool:
+ return p.atLocation(parameters.ParamLocation.CORNERS) and p.hasCategory(
+ parameters.Category.neutronics
+ )
+
+ for p in self.pc.where(check):
+ self.assertTrue(check(p), msg=p)
+ names.remove(p.name)
+ self.assertFalse(names, msg=f"{names=} should be empty")
diff --git a/armi/reactor/tests/test_reactors.py b/armi/reactor/tests/test_reactors.py
index 2052a8b1b..b84903d04 100644
--- a/armi/reactor/tests/test_reactors.py
+++ b/armi/reactor/tests/test_reactors.py
@@ -72,7 +72,7 @@ def buildOperatorOfEmptyHexBlocks(customSettings=None):
o.initializeInterfaces(r)
a = assemblies.HexAssembly("fuel")
- a.spatialGrid = grids.axialUnitGrid(1)
+ a.spatialGrid = grids.AxialGrid.fromNCells(1)
b = blocks.HexBlock("TestBlock")
b.setType("fuel")
dims = {"Tinput": 600, "Thot": 600, "op": 16.0, "ip": 1, "mult": 1}
@@ -109,7 +109,7 @@ def buildOperatorOfEmptyCartesianBlocks(customSettings=None):
o.initializeInterfaces(r)
a = assemblies.CartesianAssembly("fuel")
- a.spatialGrid = grids.axialUnitGrid(1)
+ a.spatialGrid = grids.AxialGrid.fromNCells(1)
b = blocks.CartesianBlock("TestBlock")
b.setType("fuel")
dims = {
@@ -384,6 +384,16 @@ def test_getAllXsSuffixes(self):
expectedSuffixes = ["AA"]
self.assertListEqual(expectedSuffixes, actualSuffixes)
+ def test_genBlocksByLocName(self):
+ self.r.core.genBlocksByLocName()
+ self.assertGreater(len(self.r.core.blocksByLocName), 300)
+ self.assertIn("009-009-004", self.r.core.blocksByLocName)
+
+ def test_setPitchUniform(self):
+ self.r.core.setPitchUniform(0.0)
+ for b in self.r.core.getBlocks():
+ self.assertEqual(b.getPitch(), 0.0)
+
def test_countBlocksOfType(self):
numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL])
@@ -824,8 +834,6 @@ def test_getDominantMaterial(self):
self.assertEqual(dominantFuel.getName(), "UZr")
self.assertEqual(dominantCool.getName(), "Sodium")
- self.assertEqual(list(dominantCool.getNuclides()), ["NA23"])
-
def test_getSymmetryFactor(self):
"""
Test getSymmetryFactor().
diff --git a/armi/reactor/tests/test_rz_reactors.py b/armi/reactor/tests/test_rz_reactors.py
index 8f46627b2..a3b86cad5 100644
--- a/armi/reactor/tests/test_rz_reactors.py
+++ b/armi/reactor/tests/test_rz_reactors.py
@@ -18,8 +18,8 @@
import unittest
from armi import settings
-from armi.tests import TEST_ROOT
from armi.reactor import reactors
+from armi.tests import TEST_ROOT
class TestRZTReactor(unittest.TestCase):
@@ -48,12 +48,11 @@ def test_loadRZT_reactor(self):
"""
The Godiva benchmark model is a HEU sphere with a radius of 8.74 cm.
- This unit tests loading and verifies the reactor is loaded correctly by
- comparing volumes against expected volumes for full core (including
- void boundary conditions) and just the fuel.
+ This tests loading and verifies the reactor is loaded correctly by comparing volumes against
+ expected volumes for full core (including void boundary conditions) and just the fuel.
"""
cs = settings.Settings(
- fName=os.path.join(TEST_ROOT, "Godiva.armi.unittest.yaml")
+ fName=os.path.join(TEST_ROOT, "godiva", "godiva.armi.unittest.yaml")
)
r = reactors.loadFromCs(cs)
@@ -69,7 +68,7 @@ def test_loadRZT_reactor(self):
for b in r.core.getBlocks():
reactorVolumes.append(b.getVolume())
for c in b:
- if "Godiva" in c.name:
+ if "godiva" in c.name:
fuelVolumes.append(c.getVolume())
# verify the total reactor volume is as expected
tolerance = 1e-3
diff --git a/armi/resources/burn-chain.yaml b/armi/resources/burn-chain.yaml
index 8049061dd..a51ae65f6 100644
--- a/armi/resources/burn-chain.yaml
+++ b/armi/resources/burn-chain.yaml
@@ -1,1221 +1,1221 @@
AM241:
-- transmutation:
- branch: 1.0
- products:
- - PU240
- type: n2n
-- transmutation:
- branch: 0.1384
- products:
- - PU242
- type: nGamma
-- transmutation:
- branch: 0.6616
- products:
- - CM242
- - DUMP2
- type: nGamma
-- transmutation:
- branch: 0.2
- products:
- - AM242M
- - DUMP2
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.6500e-04
- products:
- - H3
- - DUMP1
- type: fission
-- decay:
- branch: 1.0
- products:
- - NP237
- type: ad
-- decay:
- branch: 4.120055e-12
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU240
+ type: n2n
+ - transmutation:
+ branch: 0.1384
+ products:
+ - PU242
+ type: nGamma
+ - transmutation:
+ branch: 0.6616
+ products:
+ - CM242
+ - DUMP2
+ type: nGamma
+ - transmutation:
+ branch: 0.2
+ products:
+ - AM242M
+ - DUMP2
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.6500e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - decay:
+ branch: 1.0
+ products:
+ - NP237
+ type: ad
+ - decay:
+ branch: 4.120055e-12
+ products:
+ - LFP41
+ type: sf
AM242G:
-- transmutation:
- branch: 1.0
- products:
- - AM241
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - AM243
- type: nGamma
-- decay:
- branch: 0.173
- products:
- - PU242
- type: ec
-- decay:
- branch: 0.827
- products:
- - CM242
- type: bmd
+ - transmutation:
+ branch: 1.0
+ products:
+ - AM241
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - AM243
+ type: nGamma
+ - decay:
+ branch: 0.173
+ products:
+ - PU242
+ type: ec
+ - decay:
+ branch: 0.827
+ products:
+ - CM242
+ type: bmd
AM242M:
-- transmutation:
- branch: 1.0
- products:
- - AM241
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - AM243
- type: nGamma
-- decay:
- branch: 0.822865
- products:
- - PU242
- type: ec
-- decay:
- branch: 0.172135
- products:
- - CM242
- type: bmd
-- decay:
- branch: 0.005
- products:
- - NP238
- type: ad
+ - transmutation:
+ branch: 1.0
+ products:
+ - AM241
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - AM243
+ type: nGamma
+ - decay:
+ branch: 0.822865
+ products:
+ - PU242
+ type: ec
+ - decay:
+ branch: 0.172135
+ products:
+ - CM242
+ type: bmd
+ - decay:
+ branch: 0.005
+ products:
+ - NP238
+ type: ad
AM243:
-- transmutation:
- branch: 0.5
- products:
- - AM242M
- type: n2n
-- transmutation:
- branch: 0.0865
- products:
- - CM242
- type: n2n
-- transmutation:
- branch: 0.4135
- products:
- - PU242
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM244
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - NP239
- - PU239
- type: ad
+ - transmutation:
+ branch: 0.5
+ products:
+ - AM242M
+ type: n2n
+ - transmutation:
+ branch: 0.0865
+ products:
+ - CM242
+ type: n2n
+ - transmutation:
+ branch: 0.4135
+ products:
+ - PU242
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM244
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - NP239
+ - PU239
+ type: ad
B10:
-- transmutation:
- branch: 1.0
- products:
- - B11
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LI7
- - DUMP1
- type: nalph
-- transmutation:
- branch: 1.0
- products:
- - DUMP1
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - BE9
- - DUMP1
- type: nd
-- transmutation:
- branch: 1.0
- products:
- - BE10
- - DUMP1
- type: np
+ - transmutation:
+ branch: 1.0
+ products:
+ - B11
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LI7
+ - DUMP1
+ type: nalph
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP1
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - BE9
+ - DUMP1
+ type: nd
+ - transmutation:
+ branch: 1.0
+ products:
+ - BE10
+ - DUMP1
+ type: np
B11:
-- transmutation:
- branch: 1.0
- products:
- - DUMP1
- type: nGamma
-- transmutation:
- # n-alphas to Li-8 -> Be-8 -> 2 alphas
- branch: 1.0
- products:
- - HE4
- - DUMP1
- type: nalph
- productParticle: HE4
-- transmutation:
- branch: 1.0
- products:
- - B10
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - BE9
- - DUMP1
- type: nt
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP1
+ type: nGamma
+ - transmutation:
+ # n-alphas to Li-8 -> Be-8 -> 2 alphas
+ branch: 1.0
+ products:
+ - HE4
+ - DUMP1
+ type: nalph
+ productParticle: HE4
+ - transmutation:
+ branch: 1.0
+ products:
+ - B10
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - BE9
+ - DUMP1
+ type: nt
BE9:
-- transmutation:
- branch: 1.0
- products:
- - LI6
- - DUMP1
- type: nalph
-- transmutation:
- branch: 1.0
- products:
- - LI7
- - DUMP1
- type: nt
+ - transmutation:
+ branch: 1.0
+ products:
+ - LI6
+ - DUMP1
+ type: nalph
+ - transmutation:
+ branch: 1.0
+ products:
+ - LI7
+ - DUMP1
+ type: nt
BK249:
-- transmutation:
- branch: 1.0
- products:
- - CM244
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CF250
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - CF249
- type: bmd
-- decay:
- branch: 4.755215e-10
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM244
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF250
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - CF249
+ type: bmd
+ - decay:
+ branch: 4.755215e-10
+ products:
+ - LFP41
+ type: sf
CF249:
-- transmutation:
- branch: 1.0
- products:
- - CM244
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CF250
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - CM245
- type: ad
-- decay:
- branch: 5.00000e-09
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM244
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF250
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - CM245
+ type: ad
+ - decay:
+ branch: 5.00000e-09
+ products:
+ - LFP41
+ type: sf
CF250:
-- transmutation:
- branch: 1.0
- products:
- - CF249
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CF251
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - CM246
- type: ad
-- decay:
- branch: 7.70000e-04
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF249
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF251
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - CM246
+ type: ad
+ - decay:
+ branch: 7.70000e-04
+ products:
+ - LFP41
+ type: sf
CF251:
-- transmutation:
- branch: 1.0
- products:
- - CF250
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CF252
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - CM247
- type: ad
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF250
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF252
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - CM247
+ type: ad
CF252:
-- transmutation:
- branch: 1.0
- products:
- - CF251
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - DUMP2
- type: nGamma
-- decay:
- branch: 9.69080e-01
- products:
- - CM248
- type: ad
-- decay:
- branch: 3.093567e-02
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CF251
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: nGamma
+ - decay:
+ branch: 9.69080e-01
+ products:
+ - CM248
+ type: ad
+ - decay:
+ branch: 3.093567e-02
+ products:
+ - LFP41
+ type: sf
CM242:
-- transmutation:
- branch: 0.99
- products:
- - AM241
- type: n2n
-- transmutation:
- branch: 0.01
- products:
- - NP237
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM243
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - PU238
- type: ad
-- decay:
- branch: 6.794544e-08
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 0.99
+ products:
+ - AM241
+ type: n2n
+ - transmutation:
+ branch: 0.01
+ products:
+ - NP237
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM243
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - PU238
+ type: ad
+ - decay:
+ branch: 6.794544e-08
+ products:
+ - LFP41
+ type: sf
CM243:
-- transmutation:
- branch: 1.0
- products:
- - CM242
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - CM244
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- decay:
- branch: 0.9971
- products:
- - PU239
- type: ad
-- decay:
- branch: 0.0029
- products:
- - AM243
- type: ec
-- decay:
- branch: 5.30000e-11
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM242
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM244
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - decay:
+ branch: 0.9971
+ products:
+ - PU239
+ type: ad
+ - decay:
+ branch: 0.0029
+ products:
+ - AM243
+ type: ec
+ - decay:
+ branch: 5.30000e-11
+ products:
+ - LFP41
+ type: sf
CM244:
-- transmutation:
- branch: 1.0
- products:
- - CM243
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM245
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - PU240
- type: ad
-- decay:
- branch: 1.340741e-06
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM243
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM245
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - PU240
+ type: ad
+ - decay:
+ branch: 1.340741e-06
+ products:
+ - LFP41
+ type: sf
CM245:
-- transmutation:
- branch: 1.0
- products:
- - CM244
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM246
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - PU241
- type: ad
-- decay:
- branch: 6.10000e-09
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM244
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM246
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - PU241
+ type: ad
+ - decay:
+ branch: 6.10000e-09
+ products:
+ - LFP41
+ type: sf
CM246:
-- transmutation:
- branch: 1.0
- products:
- - CM245
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM247
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - PU242
- type: ad
-- decay:
- branch: 2.61500e-04
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM245
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM247
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - PU242
+ type: ad
+ - decay:
+ branch: 2.61500e-04
+ products:
+ - LFP41
+ type: sf
CM247:
-- transmutation:
- branch: 1.0
- products:
- - CM246
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - CM248
- - DUMP2
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - AM243
- type: ad
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM246
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM248
+ - DUMP2
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - AM243
+ type: ad
CM248:
-- transmutation:
- branch: 1.0
- products:
- - CM247
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - BK249
- type: nGamma
-- decay:
- branch: 0.9161
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 8.39000e-02
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - CM247
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - BK249
+ type: nGamma
+ - decay:
+ branch: 0.9161
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 8.39000e-02
+ products:
+ - LFP41
+ type: sf
H3:
-- decay:
- branch: 1.0
- products:
- - HE3
- - DUMP1
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - HE3
+ - DUMP1
+ type: bmd
HE3:
-- transmutation:
- branch: 1.0
- products:
- - HE4
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - H3
- type: np
+ - transmutation:
+ branch: 1.0
+ products:
+ - HE4
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - H3
+ type: np
HE4: []
IN113:
-- transmutation:
- branch: 0.995
- products:
- - SN114
- - DUMP1
- type: nGamma
-- transmutation:
- branch: 0.005
- products:
- - CD114
- - DUMP1
- type: nGamma
+ - transmutation:
+ branch: 0.995
+ products:
+ - SN114
+ - DUMP1
+ type: nGamma
+ - transmutation:
+ branch: 0.005
+ products:
+ - CD114
+ - DUMP1
+ type: nGamma
IN115:
-- transmutation:
- branch: 0.9997
- products:
- - SN116
- - DUMP1
- type: nGamma
-- transmutation:
- branch: 0.0003
- products:
- - CD116
- - DUMP1
- type: nGamma
+ - transmutation:
+ branch: 0.9997
+ products:
+ - SN116
+ - DUMP1
+ type: nGamma
+ - transmutation:
+ branch: 0.0003
+ products:
+ - CD116
+ - DUMP1
+ type: nGamma
LI6:
-- transmutation:
- branch: 1.0
- products:
- - LI7
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - HE4
- - DUMP1
- type: nt
+ - transmutation:
+ branch: 1.0
+ products:
+ - LI7
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - HE4
+ - DUMP1
+ type: nt
LI7:
-# LI7 n,gammas to Be8 which splits into two alphas, so we model both here by setting the productParticle to HE4
-- transmutation:
- branch: 1.0
- products:
- - HE4
- - DUMP1
- type: nGamma
- productParticle: HE4
-- transmutation:
- branch: 1.0
- products:
- - LI6
- type: n2n
+ # LI7 n,gammas to Be8 which splits into two alphas, so we model both here by setting the productParticle to HE4
+ - transmutation:
+ branch: 1.0
+ products:
+ - HE4
+ - DUMP1
+ type: nGamma
+ productParticle: HE4
+ - transmutation:
+ branch: 1.0
+ products:
+ - LI6
+ type: n2n
NP237:
-- transmutation:
- branch: 1.0
- products:
- - LFP38
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.2500e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - NP238
- - PU238
- type: nGamma
-- transmutation:
- branch: 0.346
- products:
- - PU236
- type: n2n
-- transmutation:
- branch: 0.374
- products:
- - U236
- type: n2n
-- transmutation:
- branch: 0.28
- products:
- - DUMP2
- type: n2n
-- decay:
- branch: 1.0
- products:
- - PA233
- - DUMP2
- type: ad
-- decay:
- branch: 2.139954e-12
- products:
- - DUMP1
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP38
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.2500e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP238
+ - PU238
+ type: nGamma
+ - transmutation:
+ branch: 0.346
+ products:
+ - PU236
+ type: n2n
+ - transmutation:
+ branch: 0.374
+ products:
+ - U236
+ type: n2n
+ - transmutation:
+ branch: 0.28
+ products:
+ - DUMP2
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - PA233
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 2.139954e-12
+ products:
+ - DUMP1
+ type: sf
NP238:
-- transmutation:
- branch: 1.0
- products:
- - LFP38
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - NP239
- - PU239
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - NP237
- type: n2n
-- decay:
- branch: 1.0
- products:
- - PU238
- type: bmd
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP38
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP239
+ - PU239
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP237
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - PU238
+ type: bmd
PA231:
-- transmutation:
- branch: 1.0
- products:
- - U232
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - DUMP2
- type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - U232
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: n2n
PA233:
-- transmutation:
- branch: 1.0
- products:
- - U234
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - DUMP2
- type: n2n
-- decay:
- branch: 1.0
- products:
- - U233
- type: bmd
+ - transmutation:
+ branch: 1.0
+ products:
+ - U234
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - U233
+ type: bmd
PU236:
-- transmutation:
- branch: 1.0
- products:
- - NP237
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - DUMP2
- type: n2n
-- decay:
- branch: 1.0
- products:
- - U232
- - DUMP2
- type: ad
-- decay:
- branch: 1.90000e-09
- products:
- - LFP38
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP237
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - U232
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 1.90000e-09
+ products:
+ - LFP38
+ type: sf
PU238:
-- transmutation:
- branch: 1.0
- products:
- - LFP38
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PU239
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - NP237
- type: n2n
-- decay:
- branch: 1.0
- products:
- - U234
- type: ad
-- decay:
- branch: 1.838574e-09
- products:
- - LFP38
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP38
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU239
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP237
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - U234
+ type: ad
+ - decay:
+ branch: 1.838574e-09
+ products:
+ - LFP38
+ type: sf
PU239:
-- transmutation:
- branch: 1.0
- products:
- - PU238
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP39
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.4200e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PU240
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - U235
- type: ad
-- decay:
- branch: 4.399635e-12
- products:
- - LFP39
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU238
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP39
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.4200e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU240
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - U235
+ type: ad
+ - decay:
+ branch: 4.399635e-12
+ products:
+ - LFP39
+ type: sf
PU240:
-- transmutation:
- branch: 1.0
- products:
- - PU239
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP40
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.9179e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PU241
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - U236
- type: ad
-- decay:
- branch: 5.656034e-08
- products:
- - LFP40
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU239
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP40
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.9179e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU241
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - U236
+ type: ad
+ - decay:
+ branch: 5.656034e-08
+ products:
+ - LFP40
+ type: sf
PU241:
-- transmutation:
- branch: 1.0
- products:
- - PU240
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.4100e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PU242
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - AM241
- type: bmd
-- decay:
- branch: 5.729878e-15
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU240
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.4100e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU242
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - AM241
+ type: bmd
+ - decay:
+ branch: 5.729878e-15
+ products:
+ - LFP41
+ type: sf
PU242:
-- transmutation:
- branch: 1.0
- products:
- - PU241
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP41
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.6348e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - AM243
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - U238
- type: ad
-- decay:
- branch: 5.482456e-06
- products:
- - LFP41
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU241
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP41
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.6348e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - AM243
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - U238
+ type: ad
+ - decay:
+ branch: 5.482456e-06
+ products:
+ - LFP41
+ type: sf
TH232:
-- transmutation:
- branch: 1.0
- products:
- - PA233
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PA231
- type: n2n
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 1.410000e-11
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - PA233
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PA231
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 1.410000e-11
+ products:
+ - LFP35
+ type: sf
U232:
-- transmutation:
- branch: 1.0
- products:
- - U233
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PA231
- type: n2n
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 8.612316e-13
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - U233
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PA231
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 8.612316e-13
+ products:
+ - LFP35
+ type: sf
U233:
-- transmutation:
- branch: 1.0
- products:
- - U234
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.1400e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - U232
- type: n2n
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 1.326638e-12
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - U234
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.1400e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - U232
+ type: n2n
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 1.326638e-12
+ products:
+ - LFP35
+ type: sf
U234:
-- transmutation:
- branch: 1.0
- products:
- - U233
- - DUMP2
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.5925e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - U235
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 1.169048e-11
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - U233
+ - DUMP2
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.5925e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - U235
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 1.169048e-11
+ products:
+ - LFP35
+ type: sf
U235:
-- transmutation:
- branch: 1.0
- products:
- - U234
- - DUMP2
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.0800e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - U236
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 2.011429e-09
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - U234
+ - DUMP2
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.0800e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - U236
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 2.011429e-09
+ products:
+ - LFP35
+ type: sf
U236:
-- transmutation:
- branch: 1.0
- products:
- - U235
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - NP237
- type: nGamma
-- transmutation:
- branch: 1.0
- products:
- - LFP35
- type: fission
-- transmutation:
- branch: 1.3094e-04
- products:
- - H3
- - DUMP1
- type: fission
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: ad
-- decay:
- branch: 1.201026e-09
- products:
- - LFP35
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - U235
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP237
+ type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP35
+ type: fission
+ - transmutation:
+ branch: 1.3094e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: ad
+ - decay:
+ branch: 1.201026e-09
+ products:
+ - LFP35
+ type: sf
U238:
-- transmutation:
- branch: 1.0
- products:
- - NP237
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP38
- type: fission
-# add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
-- transmutation:
- branch: 1.0262e-04
- products:
- - H3
- - DUMP1
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - NP239
- - PU239
- type: nGamma
-- decay:
- branch: 5.448780e-07
- products:
- - LFP38
- type: sf
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP237
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP38
+ type: fission
+ # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)
+ - transmutation:
+ branch: 1.0262e-04
+ products:
+ - H3
+ - DUMP1
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP239
+ - PU239
+ type: nGamma
+ - decay:
+ branch: 5.448780e-07
+ products:
+ - LFP38
+ type: sf
NP239:
-- transmutation:
- branch: 1.0
- products:
- - NP238
- type: n2n
-- transmutation:
- branch: 1.0
- products:
- - LFP38
- type: fission
-- transmutation:
- branch: 1.0
- products:
- - PU240
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - PU239
- type: bmd
+ - transmutation:
+ branch: 1.0
+ products:
+ - NP238
+ type: n2n
+ - transmutation:
+ branch: 1.0
+ products:
+ - LFP38
+ type: fission
+ - transmutation:
+ branch: 1.0
+ products:
+ - PU240
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - PU239
+ type: bmd
Y89:
-- transmutation:
- branch: 1.0
- products:
- - SR89
- type: np
+ - transmutation:
+ branch: 1.0
+ products:
+ - SR89
+ type: np
SR89:
-- transmutation:
- branch: 1.0
- products:
- - SR90
- type: nGamma
-- decay:
- branch: 1.0
- products:
- - Y89
- type: bmd
+ - transmutation:
+ branch: 1.0
+ products:
+ - SR90
+ type: nGamma
+ - decay:
+ branch: 1.0
+ products:
+ - Y89
+ type: bmd
SR90:
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: bmd
S32:
-- transmutation:
- branch: 1.0
- products:
- - P32
- type: np
-- transmutation:
- branch: 1.0
- products:
- - S33
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - P32
+ type: np
+ - transmutation:
+ branch: 1.0
+ products:
+ - S33
+ type: nGamma
P32:
-- decay:
- branch: 1.0
- products:
- - S32
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - S32
+ type: bmd
S33:
-- transmutation:
- branch: 1.0
- products:
- - S34
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - S34
+ type: nGamma
S34:
-- transmutation:
- branch: 1.0
- products:
- - S35
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - S35
+ type: nGamma
S35:
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: bmd
S36:
-- transmutation:
- branch: 1.0
- products:
- - S37
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - S37
+ type: nGamma
S37:
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: bmd
TI46:
-- transmutation:
- branch: 1.0
- products:
- - TI47
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - TI47
+ type: nGamma
TI47:
-- transmutation:
- branch: 1.0
- products:
- - SC47
- type: np
-- transmutation:
- branch: 1.0
- products:
- - TI48
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - SC47
+ type: np
+ - transmutation:
+ branch: 1.0
+ products:
+ - TI48
+ type: nGamma
SC47:
-- decay:
- branch: 1.0
- products:
- - TI47
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - TI47
+ type: bmd
TI48:
-- transmutation:
- branch: 1.0
- products:
- - TI49
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - TI49
+ type: nGamma
TI49:
-- transmutation:
- branch: 1.0
- products:
- - TI50
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - TI50
+ type: nGamma
TI50:
-- transmutation:
- branch: 1.0
- products:
- - TI51
- type: nGamma
+ - transmutation:
+ branch: 1.0
+ products:
+ - TI51
+ type: nGamma
TI51:
-- decay:
- branch: 1.0
- products:
- - DUMP2
- type: bmd
+ - decay:
+ branch: 1.0
+ products:
+ - DUMP2
+ type: bmd
diff --git a/armi/resources/config/submitter.cfg b/armi/resources/config/submitter.cfg
deleted file mode 100644
index 24248c8ea..000000000
--- a/armi/resources/config/submitter.cfg
+++ /dev/null
@@ -1,14 +0,0 @@
-[settings]
-recent_files
-write_style = short
-
-[warnings]
-allow_warnings = True
-condensed_warnings = True
-job_ended = False
-
-[window]
-maximized
-sash_position = -160
-screen_position = (-1, -1)
-size = (1125, 720)
diff --git a/armi/resources/images/armi-icon.ico b/armi/resources/images/armi-icon.ico
deleted file mode 100644
index 2dc96a02d..000000000
Binary files a/armi/resources/images/armi-icon.ico and /dev/null differ
diff --git a/armi/resources/images/armiicon.ico b/armi/resources/images/armiicon.ico
deleted file mode 100644
index f45a44c57..000000000
Binary files a/armi/resources/images/armiicon.ico and /dev/null differ
diff --git a/armi/resources/mcc-nuclides.yaml b/armi/resources/mcc-nuclides.yaml
index 8ba519a08..a4d68fdb7 100644
--- a/armi/resources/mcc-nuclides.yaml
+++ b/armi/resources/mcc-nuclides.yaml
@@ -2,7 +2,7 @@
# codes. The MC2-2 code base uses ENDF/B-V.2 and the MC2-3 code base uses
# ENDF/B-VII.0. This file can be amended in the future for MC2-3 as the
# code base changes, but the nuclides that MC2-3 models are consistent with
-# the data that is supplied by ENDF/B-VII.0. See: Appendix B of ANL/NE-11/41 Rev.3
+# the data that is supplied by ENDF/B-VII.0. See: Appendix B of ANL/NE-11/41 Rev.3
# Public Link: https://publications.anl.gov/anlpubs/2018/10/147840.pdf.
AC225:
ENDF/B-V.2: null
diff --git a/armi/runLog.py b/armi/runLog.py
index dcf0e1ff6..484672dfd 100644
--- a/armi/runLog.py
+++ b/armi/runLog.py
@@ -44,17 +44,16 @@
runLog.setVerbosity('debug')
"""
-from glob import glob
import collections
import logging
import operator
import os
import sys
import time
+from glob import glob
from armi import context
-
# global constants
_ADD_LOG_METHOD_STR = """def {0}(self, message, *args, **kws):
if self.isEnabledFor({1}):
@@ -344,14 +343,6 @@ def concatenateLogs(logDir=None):
stdoutFiles = sorted(glob(os.path.join(logDir, "*.stdout")))
if not len(stdoutFiles):
info("No log files found to concatenate.")
-
- # If the log dir is empty, we can delete it.
- try:
- os.rmdir(logDir)
- except: # noqa: bare-except
- # low priority concern: it's an empty log dir.
- pass
-
return
info("Concatenating {0} log files".format(len(stdoutFiles)))
@@ -362,8 +353,10 @@ def concatenateLogs(logDir=None):
stdoutFile = os.path.normpath(stdoutPath).split(os.sep)[-1]
prefix = STDOUT_LOGGER_NAME + "."
if stdoutFile[0 : len(prefix)] == prefix:
- caseTitle = stdoutFile.split(".")[-3]
- break
+ candidate = stdoutFile.split(".")[-3]
+ if len(candidate) > 0:
+ caseTitle = candidate
+ break
combinedLogName = os.path.join(logDir, "{}-mpi.log".format(caseTitle))
with open(combinedLogName, "w") as workerLog:
diff --git a/armi/settings/__init__.py b/armi/settings/__init__.py
index 1a4566280..258864bde 100644
--- a/armi/settings/__init__.py
+++ b/armi/settings/__init__.py
@@ -29,8 +29,8 @@
from armi import runLog
from armi.settings.caseSettings import Settings
-from armi.settings.setting import Default # noqa: unused-import
-from armi.settings.setting import Option # noqa: unused-import
+from armi.settings.setting import Default # noqa: F401
+from armi.settings.setting import Option # noqa: F401
from armi.settings.setting import Setting
from armi.utils.customExceptions import InvalidSettingsFileError
diff --git a/armi/settings/caseSettings.py b/armi/settings/caseSettings.py
index 8b76f72e0..165359fd5 100644
--- a/armi/settings/caseSettings.py
+++ b/armi/settings/caseSettings.py
@@ -467,7 +467,9 @@ def modified(self, caseTitle=None, newSettings=None):
elif key in settings.__settings:
settings.__settings[key].setValue(val)
else:
- settings.__settings[key] = Setting(key, val)
+ settings.__settings[key] = Setting(
+ key, val, description="Description from cs.modified()"
+ )
return settings
diff --git a/armi/settings/fwSettings/databaseSettings.py b/armi/settings/fwSettings/databaseSettings.py
index 7424c7fec..3c527b1ae 100644
--- a/armi/settings/fwSettings/databaseSettings.py
+++ b/armi/settings/fwSettings/databaseSettings.py
@@ -18,7 +18,6 @@
from armi.settings import setting
-
CONF_DB = "db"
CONF_DEBUG_DB = "debugDB"
CONF_RELOAD_DB_NAME = "reloadDBName"
@@ -38,7 +37,12 @@ def defineSettings():
label="Activate Database",
description="Write the state information to a database at every timestep",
),
- setting.Setting(CONF_DEBUG_DB, default=False, label="Debug Database"),
+ setting.Setting(
+ CONF_DEBUG_DB,
+ default=False,
+ label="Debug Database",
+ description="Write state to DB with a unique timestamp or label.",
+ ),
setting.Setting(
CONF_RELOAD_DB_NAME,
default="",
diff --git a/armi/settings/fwSettings/globalSettings.py b/armi/settings/fwSettings/globalSettings.py
index 4f88367ba..3b1143880 100644
--- a/armi/settings/fwSettings/globalSettings.py
+++ b/armi/settings/fwSettings/globalSettings.py
@@ -26,8 +26,8 @@
from armi import context
from armi.settings import setting
-from armi.utils.mathematics import isMonotonic
from armi.settings.fwSettings import tightCouplingSettings
+from armi.utils.mathematics import isMonotonic
# Framework settings
@@ -54,7 +54,6 @@
CONF_CYCLE_LENGTHS = "cycleLengths"
CONF_CYCLES = "cycles"
CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION = "cyclesSkipTightCouplingInteraction"
-CONF_DEBUG = "debug"
CONF_DEBUG_MEM = "debugMem"
CONF_DEBUG_MEM_SIZE = "debugMemSize"
CONF_DECAY_CONSTANTS = "decayConstants"
@@ -65,7 +64,6 @@
CONF_DETAIL_ASSEM_LOCATIONS_BOL = "detailAssemLocationsBOL"
CONF_DETAIL_ASSEM_NUMS = "detailAssemNums"
CONF_DETAILED_AXIAL_EXPANSION = "detailedAxialExpansion"
-CONF_DO_ORIFICED_TH = "doOrificedTH" # zones
CONF_DUMP_SNAPSHOT = "dumpSnapshot"
CONF_EQ_DIRECT = "eqDirect" # fuelCycle/equilibrium coupling
CONF_EXPLICIT_REPEAT_SHUFFLES = "explicitRepeatShuffles"
@@ -222,8 +220,9 @@ def defineSettings() -> List[setting.Setting]:
default=True,
label="Input Height Considered Hot",
description=(
- "This is a flag to determine if block heights, as provided in blueprints, are at hot dimensions. "
- "If false, block heights are at cold/as-built dimensions and will be thermally expanded as appropriate."
+ "This is a flag to determine if block heights, as provided in blueprints, are at "
+ "hot dimensions. If false, block heights are at cold/as-built dimensions and will "
+ "be thermally expanded as appropriate."
),
),
setting.Setting(
@@ -237,8 +236,7 @@ def defineSettings() -> List[setting.Setting]:
CONF_TRACE,
default=False,
label="Use the Python Tracer",
- description="Activate Python trace module to print out each line as it's "
- "executed",
+ description="Activate Python trace module to print out each line as it's executed",
isEnvironment=True,
),
setting.Setting(
@@ -306,8 +304,7 @@ def defineSettings() -> List[setting.Setting]:
default=1.0,
label="Plant Availability Factor",
description="Availability factor of the plant. This is the fraction of the "
- "time that the plant is operating. If variable, use `availabilityFactors` "
- "setting.",
+ "time that the plant is operating. If variable, use `availabilityFactors` setting.",
oldNames=[
("capacityFactor", None),
],
@@ -425,7 +422,8 @@ def defineSettings() -> List[setting.Setting]:
CONF_BURNUP_PEAKING_FACTOR,
default=0.0,
label="Burn-up Peaking Factor",
- description="None",
+ description="The peak/avg factor for burnup and DPA. If it is not set the current flux "
+ "peaking is used (this is typically conservatively high).",
schema=vol.All(vol.Coerce(float), vol.Range(min=0)),
),
setting.Setting(
@@ -442,20 +440,24 @@ def defineSettings() -> List[setting.Setting]:
description="A comment describing this case",
),
setting.Setting(
- CONF_COPY_FILES_FROM, default=[], label="None", description="None"
- ),
- setting.Setting(
- CONF_COPY_FILES_TO, default=[], label="None", description="None"
+ CONF_COPY_FILES_FROM,
+ default=[],
+ label="Copy These Files",
+ description="A list of files that need to be copied at the start of a run.",
),
setting.Setting(
- CONF_DEBUG, default=False, label="Python Debug Mode", description="None"
+ CONF_COPY_FILES_TO,
+ default=[],
+ label="Copy to These Directories",
+ description="A list of directories to copy provided files into at the start of a run."
+ "This list can be of length zero (copy to working dir), 1 (copy all files to the same "
+ f"place), or it must be the same length as {CONF_COPY_FILES_FROM}",
),
setting.Setting(
CONF_DEBUG_MEM,
default=False,
label="Debug Memory",
- description="Turn on memory debugging options to help find problems with "
- "the code",
+ description="Turn on memory debugging options to help find problems with the code",
),
setting.Setting(
CONF_DEBUG_MEM_SIZE,
@@ -490,8 +492,7 @@ def defineSettings() -> List[setting.Setting]:
label="Detailed Assems - ID",
description="Assembly numbers(IDs) for assemblies that will have "
"'detailed' treatment. This option will track assemblies that not in the "
- "core at BOL. Note: This option is interpreted differently by different "
- "modules.",
+ "core at BOL. Note: This option is interpreted differently by different modules.",
schema=vol.Schema([int]),
),
setting.Setting(
@@ -508,13 +509,6 @@ def defineSettings() -> List[setting.Setting]:
description="List of snapshots to dump reactor physics kernel input and "
"output files. Can be used to perform follow-on analysis.",
),
- setting.Setting(
- CONF_DO_ORIFICED_TH,
- default=False,
- label="Perform Core Orificing",
- description="Perform orificed thermal hydraulics (requires bounds file "
- "from a previous case)",
- ),
setting.Setting(
CONF_EQ_DIRECT,
default=False,
@@ -532,7 +526,8 @@ def defineSettings() -> List[setting.Setting]:
CONF_FRESH_FEED_TYPE,
default="feed fuel",
label="Fresh Feed Type",
- description="None",
+ description="The type of fresh fuel added to the core, used in certain pre-defined "
+ "fuel shuffling logic sequences.",
options=["feed fuel", "igniter fuel", "inner driver fuel"],
),
setting.Setting(
@@ -671,7 +666,10 @@ def defineSettings() -> List[setting.Setting]:
schema=vol.All(vol.Coerce(float), vol.Range(min=0)),
),
setting.Setting(
- CONF_REMOVE_PER_CYCLE, default=3, label="Move per Cycle", description="None"
+ CONF_REMOVE_PER_CYCLE,
+ default=3,
+ label="Remove per Cycle",
+ description="The number of fuel assemblies removed per cycle at equilibrium.",
),
setting.Setting(
CONF_RUN_TYPE,
diff --git a/armi/settings/setting.py b/armi/settings/setting.py
index 9146c4e09..1d83cccc7 100644
--- a/armi/settings/setting.py
+++ b/armi/settings/setting.py
@@ -65,7 +65,7 @@ def __init__(
self,
name,
default,
- description=None,
+ description,
label=None,
options=None,
schema=None,
@@ -83,7 +83,7 @@ def __init__(
the setting's name
default : object
The setting's default value
- description : str, optional
+ description : str
The description of the setting
label : str, optional
the shorter description used for the ARMI GUI
@@ -113,6 +113,9 @@ def __init__(
will result in errors, requiring to user to update their input by hand to
use more current settings.
"""
+ assert description, f"Setting {name} defined without description."
+ assert description != "None", f"Setting {name} defined without description."
+
self.name = name
self.description = description or name
self.label = label or name
@@ -121,13 +124,12 @@ def __init__(
self.subLabels = subLabels
self.isEnvironment = isEnvironment
self.oldNames: List[Tuple[str, Optional[datetime.date]]] = oldNames or []
-
self._default = default
+ self._value = copy.deepcopy(default) # break link from _default
# Retain the passed schema so that we don't accidentally stomp on it in
# addOptions(), et.al.
self._customSchema = schema
self._setSchema()
- self._value = copy.deepcopy(default) # break link from _default
@property
def underlyingType(self):
diff --git a/armi/settings/settingsIO.py b/armi/settings/settingsIO.py
index 4447979e4..516b5f238 100644
--- a/armi/settings/settingsIO.py
+++ b/armi/settings/settingsIO.py
@@ -235,7 +235,7 @@ def _readYaml(self, stream):
self.inputVersion = setts[CONF_VERSIONS]["armi"]
else:
runLog.warning(
- "Versions setting section not found. Continuing with uncontrolled verisons."
+ "Versions setting section not found. Continuing with uncontrolled versions."
)
self.inputVersion = "uncontrolled"
diff --git a/armi/settings/settingsValidation.py b/armi/settings/settingsValidation.py
new file mode 100644
index 000000000..ed11f5b33
--- /dev/null
+++ b/armi/settings/settingsValidation.py
@@ -0,0 +1,803 @@
+# Copyright 2019 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A system to check user settings for validity and provide users with meaningful
+suggestions to fix.
+
+This allows developers to define a rich set of rules and suggestions for user settings.
+These then pop up during initialization of a run, either on the command line or as
+dialogues in the GUI. They say things like: "Your ___ setting has the value ___, which
+is impossible. Would you like to switch to ___?"
+"""
+import itertools
+import os
+import re
+import shutil
+
+from armi import context
+from armi import getPluginManagerOrFail
+from armi import runLog
+from armi.physics import neutronics
+from armi.reactor import geometry
+from armi.reactor import systemLayoutInput
+from armi.settings.settingsIO import (
+ prompt,
+ RunLogPromptCancel,
+ RunLogPromptUnresolvable,
+)
+from armi.utils import directoryChangers
+from armi.utils import pathTools
+from armi.utils.mathematics import expandRepeatedFloats
+
+
+class Query:
+ """
+ An individual setting validator.
+
+ .. impl:: Rules to validate and customize a setting's behavior.
+ :id: I_ARMI_SETTINGS_RULES
+ :implements: R_ARMI_SETTINGS_RULES
+
+ This class is meant to represent a generic validation test against a setting.
+ The goal is: developers create new settings and they want to make sure those
+ settings are used correctly. As an implementation, users pass in a
+ ``condition`` function to this class that returns ``True`` or ``False`` based
+ on the setting name and value. And then this class has a ``resolve`` method
+ which tests if the condition is met. Optionally, this class also contains a
+ ``correction`` function that allows users to automatically correct a bad
+ setting, if the developers can find a clear path forward.
+ """
+
+ def __init__(self, condition, statement, question, correction):
+ """
+ Construct a query.
+
+ Parameters
+ ----------
+ condition : callable
+ A callable that returns True or False. If True, then the query activates
+ its question and potential correction.
+ statement : str
+ A statement of the problem indicated by a True condition
+ question : str
+ A question asking the user for confirmation of the proposed
+ fix.
+ correction : callable
+ A callable that when called fixes the situation. See
+ :py:meth:`Inspector.NO_ACTION` for no-ops.
+ """
+ self.condition = condition
+ self.statement = statement
+ self.question = question
+ self.correction = correction
+ # True if the query is `passed` and does not result in an immediate failure
+ self.corrected = False
+ self._passed = False
+ self.autoResolved = True
+
+ def __repr__(self):
+ # Add representation so that it's possible to identify which one
+ # is being referred to when there are errors.
+ return "".format(self.statement)
+
+ def __bool__(self):
+ try:
+ return bool(self.condition())
+ except TypeError:
+ runLog.error(
+ f"Invalid setting validation query. Update validator for: {self})"
+ )
+ raise
+
+ def isCorrective(self):
+ return self.correction is not Inspector.NO_ACTION
+
+ def resolve(self):
+ """Standard i/o prompt for resolution of an individual query."""
+ if context.MPI_RANK != 0:
+ return
+
+ if self.condition():
+ try:
+ if self.isCorrective():
+ try:
+ make_correction = prompt(
+ "INSPECTOR: " + self.statement,
+ self.question,
+ "YES_NO",
+ "NO_DEFAULT",
+ "CANCEL",
+ )
+ if make_correction:
+ self.correction()
+ self.corrected = True
+ self._passed = True
+ except RunLogPromptCancel as ki:
+ raise KeyboardInterrupt from ki
+ else:
+ try:
+ continue_submission = prompt(
+ "INSPECTOR: " + self.statement,
+ "Continue?",
+ "YES_NO",
+ "NO_DEFAULT",
+ "CANCEL",
+ )
+ if not continue_submission:
+ raise KeyboardInterrupt
+ except RunLogPromptCancel as ki:
+ raise KeyboardInterrupt from ki
+ except RunLogPromptUnresolvable:
+ self.autoResolved = False
+ self._passed = True
+
+
+class Inspector:
+ """
+ This manages queries which assert certain states of the data model, generally presenting
+ themselves to the user, offering information on the potential problem, a question
+ and the action to take on an affirmative and negative answer from the user.
+
+ In practice very useful for making sure setting values are as intended and without
+ bad interplay with one another.
+
+ One Inspector will contain multiple Queries and be associated directly with an
+ :py:class:`~armi.operators.operator.Operator`.
+ """
+
+ @staticmethod
+ def NO_ACTION():
+ """Convenience callable used to generate Queries that can't be easily auto-resolved."""
+ return None
+
+ def __init__(self, cs):
+ """
+ Construct an inspector.
+
+ Parameters
+ ----------
+ cs : Settings
+ """
+ self.queries = []
+ self.cs = cs
+ self.geomType = None
+ self.coreSymmetry = None
+ self._inspectBlueprints()
+ self._setGeomType()
+ self._inspectSettings()
+
+ # Gather and attach validators from all plugins
+ # This runs on all registered plugins, not just active ones.
+ pluginQueries = getPluginManagerOrFail().hook.defineSettingsValidators(
+ inspector=self
+ )
+ for queries in pluginQueries:
+ self.queries.extend(queries)
+
+ def run(self, cs=None):
+ """
+ Run through each query and deal with it if possible.
+
+ Returns
+ -------
+ correctionsMade : bool
+ Whether or not anything was updated.
+
+ Raises
+ ------
+ RuntimeError
+ When a programming error causes queries to loop.
+ """
+ if context.MPI_RANK != 0:
+ return False
+
+ # the following attribute changes will alter what the queries investigate when resolved
+ correctionsMade = False
+ self.cs = cs or self.cs
+ runLog.debug("{} executing queries.".format(self.__class__.__name__))
+ if not any(self.queries):
+ runLog.debug(
+ "{} found no problems with the current state.".format(
+ self.__class__.__name__
+ )
+ )
+ else:
+ for query in self.queries:
+ query.resolve()
+ if query.corrected:
+ correctionsMade = True
+ issues = [
+ query
+ for query in self.queries
+ if query and (query.isCorrective() and not query._passed)
+ ]
+ if any(issues):
+ # something isn't resolved or was unresolved by changes
+ raise RuntimeError(
+ "The input inspection did not resolve all queries, "
+ "some issues are creating cyclic resolutions: {}".format(issues)
+ )
+ runLog.debug("{} has finished querying.".format(self.__class__.__name__))
+
+ if correctionsMade:
+ # find unused file path to store original settings as to avoid overwrite
+ strSkeleton = "{}_old".format(self.cs.path.split(".yaml")[0])
+ for num in itertools.count():
+ if num == 0:
+ renamePath = f"{strSkeleton}.yaml"
+ else:
+ renamePath = f"{strSkeleton}{num}.yaml"
+ if not self._csRelativePathExists(renamePath):
+ break
+ # preserve old file before saving settings file
+ runLog.important(
+ f"Preserving original settings file by renaming `{renamePath}`"
+ )
+ shutil.copy(self.cs.path, renamePath)
+ # save settings file
+ self.cs.writeToYamlFile(self.cs.path)
+
+ return correctionsMade
+
+ def addQuery(self, condition, statement, question, correction):
+ """Convenience method, query must be resolved, else run fails."""
+ if not callable(correction):
+ raise ValueError(
+ 'Query for "{}" malformed. Expecting callable.'.format(statement)
+ )
+ self.queries.append(Query(condition, statement, question, correction))
+
+ def addQueryBadLocationWillLikelyFail(self, settingName):
+ """Add a query indicating the current path for ``settingName`` does not exist and will likely fail."""
+ self.addQuery(
+ lambda: not os.path.exists(pathTools.armiAbsPath(self.cs[settingName])),
+ "Setting {} points to nonexistent location\n{}\nFailure extremely likely".format(
+ settingName, self.cs[settingName]
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ def addQueryCurrentSettingMayNotSupportFeatures(self, settingName):
+ """Add a query that the current value for ``settingName`` may not support certain features."""
+ self.addQuery(
+ lambda: self.cs[settingName] != self.cs.getSetting(settingName).default,
+ "{} set as:\n{}\nUsing this location instead of the default location\n{}\n"
+ "may not support certain functions.".format(
+ settingName,
+ self.cs[settingName],
+ self.cs.getSetting(settingName).default,
+ ),
+ "Revert to default location?",
+ lambda: self._assignCS(
+ settingName, self.cs.getSetting(settingName).default
+ ),
+ )
+
+ def _assignCS(self, key, value):
+ """Lambda assignment workaround."""
+ # this type of assignment works, but be mindful of
+ # scoping when trying different methods
+ runLog.extra(f"Updating setting `{key}` to `{value}`")
+ self.cs[key] = value
+
+ def _raise(self):
+ raise KeyboardInterrupt("Input inspection has been interrupted.")
+
+ def _inspectBlueprints(self):
+ """Blueprints early error detection and old format conversions."""
+ from armi.physics.neutronics.settings import CONF_LOADING_FILE
+
+ # if there is a blueprints object, we don't need to check for a file
+ if self.cs.filelessBP:
+ return
+
+ self.addQuery(
+ lambda: not self.cs[CONF_LOADING_FILE],
+ "No blueprints file loaded. Run will probably fail.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: not self._csRelativePathExists(self.cs[CONF_LOADING_FILE]),
+ "Blueprints file {} not found. Run will fail.".format(
+ self.cs[CONF_LOADING_FILE]
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ def _csRelativePathExists(self, filename):
+ csRelativePath = self._csRelativePath(filename)
+ return os.path.exists(csRelativePath) and os.path.isfile(csRelativePath)
+
+ def _csRelativePath(self, filename):
+ return os.path.join(self.cs.inputDirectory, filename)
+
+ def _setGeomType(self):
+ if self.cs["geomFile"]:
+ with directoryChangers.DirectoryChanger(
+ self.cs.inputDirectory, dumpOnException=False
+ ):
+ geom = systemLayoutInput.SystemLayoutInput()
+ geom.readGeomFromFile(self.cs["geomFile"])
+
+ self.geomType, self.coreSymmetry = geom.geomType, geom.symmetry
+
+ def _correctCyclesToZeroBurnup(self):
+ self._assignCS("nCycles", 1)
+ self._assignCS("burnSteps", 0)
+ self._assignCS("cycleLength", None)
+ self._assignCS("cycleLengths", None)
+ self._assignCS("availabilityFactor", None)
+ self._assignCS("availabilityFactors", None)
+ self._assignCS("cycles", [])
+
+ def _checkForBothSimpleAndDetailedCyclesInputs(self):
+ """
+ Because the only way to check if a setting has been "entered" is to check
+ against the default, if the user specifies all the simple cycle settings
+ _exactly_ as the defaults, this won't be caught. But, it would be very
+ coincidental for the user to _specify_ all the default values when
+ performing any real analysis.
+
+ Also, we must bypass the `Settings` getter and reach directly
+ into the underlying `__settings` dict to avoid triggering an error
+ at this stage in the run. Otherwise an error will inherently be raised
+ if the detailed cycles input is used because the simple cycles inputs
+ have defaults. We don't care that those defaults are there, we only
+ have a problem with those defaults being _used_, which will be caught
+ later on.
+ """
+ bothCyclesInputTypesPresent = (
+ self.cs._Settings__settings["cycleLength"].value
+ != self.cs._Settings__settings["cycleLength"].default
+ or self.cs._Settings__settings["cycleLengths"].value
+ != self.cs._Settings__settings["cycleLengths"].default
+ or self.cs._Settings__settings["burnSteps"].value
+ != self.cs._Settings__settings["burnSteps"].default
+ or self.cs._Settings__settings["availabilityFactor"].value
+ != self.cs._Settings__settings["availabilityFactor"].default
+ or self.cs._Settings__settings["availabilityFactors"].value
+ != self.cs._Settings__settings["availabilityFactors"].default
+ or self.cs._Settings__settings["powerFractions"].value
+ != self.cs._Settings__settings["powerFractions"].default
+ ) and self.cs["cycles"] != []
+
+ return bothCyclesInputTypesPresent
+
+ def _inspectSettings(self):
+ """Check settings for inconsistencies."""
+ from armi import operators
+ from armi.physics.neutronics.settings import (
+ CONF_BC_COEFFICIENT,
+ CONF_BOUNDARIES,
+ CONF_XS_KERNEL,
+ CONF_XS_SCATTERING_ORDER,
+ )
+
+ self.addQueryBadLocationWillLikelyFail("operatorLocation")
+
+ self.addQuery(
+ lambda: self.cs["outputFileExtension"] == "pdf" and self.cs["genReports"],
+ "Output files of '.pdf' format are not supported by the reporting HTML generator. '.pdf' "
+ "images will not be included.",
+ "Switch to '.png'?",
+ lambda: self._assignCS("outputFileExtension", "png"),
+ )
+
+ self.addQuery(
+ lambda: (
+ (
+ self.cs["beta"]
+ and isinstance(self.cs["beta"], list)
+ and not self.cs["decayConstants"]
+ )
+ or (self.cs["decayConstants"] and not self.cs["beta"])
+ ),
+ "Both beta components and decay constants should be provided if either are "
+ "being supplied.",
+ "",
+ self.NO_ACTION,
+ ),
+
+ self.addQuery(
+ lambda: self.cs["skipCycles"] > 0 and not self.cs["reloadDBName"],
+ "You have chosen to do a restart case without specifying a database to load from. "
+ "Run will load from output files, if they exist but burnup, etc. will not be updated.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["runType"] != operators.RunTypes.SNAPSHOTS
+ and self.cs["loadStyle"] == "fromDB"
+ and self.cs["startCycle"] == 0
+ and self.cs["startNode"] == 0,
+ "Starting from cycle 0, and time node 0 was chosen. Restart runs load from "
+ "the time node just before the restart. There is no time node to load from "
+ "before cycle 0 node 0. Either switch to the snapshot operator, start from "
+ "a different time step or load from inputs rather than database as "
+ "`loadStyle`.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["runType"] == operators.RunTypes.SNAPSHOTS
+ and not (self.cs["dumpSnapshot"] or self.cs["defaultSnapshots"]),
+ "The Snapshots operator was specified, but no dump snapshots were chosen."
+ "Please specify snapshot steps with the `dumpSnapshot` setting.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs.caseTitle.lower()
+ == os.path.splitext(os.path.basename(self.cs["reloadDBName"].lower()))[0],
+ "Snapshot DB ({0}) and main DB ({1}) cannot have the same name."
+ "Change name of settings file and resubmit.".format(
+ self.cs["reloadDBName"], self.cs.caseTitle
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["reloadDBName"] != ""
+ and not os.path.exists(self.cs["reloadDBName"]),
+ "Reload database {} does not exist. \nPlease point to an existing DB, "
+ "or set to empty and load from input.".format(self.cs["reloadDBName"]),
+ "",
+ self.NO_ACTION,
+ )
+
+ def _willBeCopiedFrom(fName):
+ return any(
+ fName == os.path.split(copyFile)[1]
+ for copyFile in self.cs["copyFilesFrom"]
+ )
+
+ self.addQuery(
+ lambda: self.cs["explicitRepeatShuffles"]
+ and not self._csRelativePathExists(self.cs["explicitRepeatShuffles"])
+ and not _willBeCopiedFrom(self.cs["explicitRepeatShuffles"]),
+ "The specified repeat shuffle file `{0}` does not exist, and won't be copied. "
+ "Run will crash.".format(self.cs["explicitRepeatShuffles"]),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: not self.cs["power"] and not self.cs["powerDensity"],
+ "No power or powerDensity set. You must always start by importing a base settings file.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["power"] > 0 and self.cs["powerDensity"] > 0,
+ "The power and powerDensity are both set, please note the power will be used as the truth.",
+ "",
+ self.NO_ACTION,
+ )
+
+ # The gamma cross sections generated for MC2-3 by ANL were done with NJOY with
+ # P3 scattering. MC2-3 would have to be modified and the gamma cross sections
+ # re-generated with NJOY for MC2-3 to allow any other scattering order with
+ # gamma cross sections enabled.
+ self.addQuery(
+ lambda: (
+ "MC2v3" in self.cs[CONF_XS_KERNEL]
+ and neutronics.gammaXsAreRequested(self.cs)
+ and self.cs[CONF_XS_SCATTERING_ORDER] != 3
+ ),
+ "MC2-3 will crash if a scattering order is not set to 3 when generating gamma XS.",
+ f"Would you like to set the `{CONF_XS_SCATTERING_ORDER}` to 3?",
+ lambda: self._assignCS(CONF_XS_SCATTERING_ORDER, 3),
+ )
+
+ self.addQuery(
+ lambda: self.cs["outputCacheLocation"]
+ and not os.path.exists(self.cs["outputCacheLocation"]),
+ "`outputCacheLocation` path {} does not exist. Please specify a location that exists.".format(
+ self.cs["outputCacheLocation"]
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: (
+ not self.cs["tightCoupling"]
+ and self.cs["tightCouplingMaxNumIters"] != 4
+ ),
+ "You've requested a non default number of tight coupling iterations but left tightCoupling: False."
+ "Do you want to set tightCoupling to True?",
+ "",
+ lambda: self._assignCS("tightCoupling", True),
+ )
+
+ self.addQuery(
+ lambda: (not self.cs["tightCoupling"] and self.cs["tightCouplingSettings"]),
+ "You've requested non default tight coupling settings but tightCoupling: False."
+ "Do you want to set tightCoupling to True?",
+ "",
+ lambda: self._assignCS("tightCoupling", True),
+ )
+
+ self.addQuery(
+ lambda: self.cs["startCycle"]
+ and self.cs["nCycles"] < self.cs["startCycle"],
+ "nCycles must be greater than or equal to startCycle in restart cases. nCycles"
+ " is the _total_ number of cycles in the completed run (i.e. restarted +"
+ " continued cycles). Please update the case settings.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["nCycles"] in [0, None],
+ "Cannot run 0 cycles. Set burnSteps to 0 to activate a single time-independent case.",
+ "Set 1 cycle and 0 burnSteps for single time-independent case?",
+ self._correctCyclesToZeroBurnup,
+ )
+
+ self.addQuery(
+ self._checkForBothSimpleAndDetailedCyclesInputs,
+ "If specifying detailed cycle history with `cycles`, you may not"
+ " also use any of the simple cycle history inputs `cycleLength(s)`,"
+ " `burnSteps`, `availabilityFactor(s)`, or `powerFractions`."
+ " Using the detailed cycle history.",
+ "",
+ self.NO_ACTION,
+ )
+
+ def _factorsAreValid(factors, maxVal=1.0):
+ try:
+ expandedList = expandRepeatedFloats(factors)
+ except (ValueError, IndexError):
+ return False
+ return (
+ all(0.0 <= val <= maxVal for val in expandedList)
+ and len(expandedList) == self.cs["nCycles"]
+ )
+
+ if self.cs["cycles"] == []:
+ self.addQuery(
+ lambda: (
+ self.cs["availabilityFactors"]
+ and not _factorsAreValid(self.cs["availabilityFactors"])
+ ),
+ "`availabilityFactors` was not set to a list compatible with the number of cycles. "
+ "Please update input or use constant duration.",
+ "Use constant availability factor specified in `availabilityFactor` setting?",
+ lambda: self._assignCS("availabilityFactors", []),
+ )
+
+ self.addQuery(
+ lambda: (
+ self.cs["powerFractions"]
+ and not _factorsAreValid(self.cs["powerFractions"])
+ ),
+ "`powerFractions` was not set to a compatible list. "
+ "Please update input or use full power at all cycles.",
+ "Use full power for all cycles?",
+ lambda: self._assignCS("powerFractions", []),
+ )
+
+ self.addQuery(
+ lambda: (
+ self.cs["cycleLengths"]
+ and not _factorsAreValid(self.cs["cycleLengths"], maxVal=1e10)
+ ),
+ "`cycleLengths` was not set to a list compatible with the number of cycles."
+ " Please update input or use constant duration.",
+ "Use constant cycle length specified in `cycleLength` setting?",
+ lambda: self._assignCS("cycleLengths", []),
+ )
+
+ self.addQuery(
+ lambda: (
+ self.cs["runType"] == operators.RunTypes.STANDARD
+ and self.cs["burnSteps"] == 0
+ and (
+ (
+ len(self.cs["cycleLengths"]) > 1
+ if self.cs["cycleLengths"] is not None
+ else False
+ )
+ or self.cs["nCycles"] > 1
+ )
+ ),
+ "Cannot run multi-cycle standard cases with 0 burnSteps per cycle. Please update settings.",
+ "",
+ self.NO_ACTION,
+ )
+
+ def decayCyclesHaveInputThatWillBeIgnored():
+ """Check if there is any decay-related input that will be ignored."""
+ try:
+ powerFracs = expandRepeatedFloats(self.cs["powerFractions"])
+ availabilities = expandRepeatedFloats(
+ self.cs["availabilityFactors"]
+ ) or ([self.cs["availabilityFactor"]] * self.cs["nCycles"])
+ except Exception:
+ return True
+
+ # This will be a full decay step and any power fraction will be ignored. May be ok.
+ return any(
+ pf > 0.0 and af == 0.0 for pf, af in zip(powerFracs, availabilities)
+ )
+
+ self.addQuery(
+ lambda: (
+ self.cs["cycleLengths"]
+ and self.cs["powerFractions"]
+ and decayCyclesHaveInputThatWillBeIgnored()
+ and not self.cs["cycles"]
+ ),
+ "At least one cycle has a non-zero power fraction but an availability of zero. Please "
+ "update the input.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["operatorLocation"]
+ and self.cs["runType"] != operators.RunTypes.STANDARD,
+ "The `runType` setting is set to `{0}` but there is a `custom operator location` defined".format(
+ self.cs["runType"]
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["operatorLocation"]
+ and self.cs["runType"] != operators.RunTypes.STANDARD,
+ "The `runType` setting is set to `{0}` but there is a `custom operator location` defined".format(
+ self.cs["runType"]
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["skipCycles"] > 0
+ and not os.path.exists(self.cs.caseTitle + ".restart.dat"),
+ "This is a restart case, but the required restart file {0}.restart.dat is not found".format(
+ self.cs.caseTitle
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["deferredInterfacesCycle"] > self.cs["nCycles"],
+ "The deferred interface activation cycle exceeds set cycle occurrence. "
+ "Interfaces will not be activated in this run!",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: (
+ self.cs[CONF_BOUNDARIES] != neutronics.GENERAL_BC
+ and self.cs[CONF_BC_COEFFICIENT]
+ ),
+ f"General neutronic boundary condition was not selected, but `{CONF_BC_COEFFICIENT}` was defined. "
+ f"Please enable `Generalized` neutronic boundary condition or disable `{CONF_BC_COEFFICIENT}`.",
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["geomFile"]
+ and str(self.geomType) not in geometry.VALID_GEOMETRY_TYPE,
+ "{} is not a valid geometry Please update geom type on the geom file. "
+ "Valid (case insensitive) geom types are: {}".format(
+ self.geomType, geometry.VALID_GEOMETRY_TYPE
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+ self.addQuery(
+ lambda: self.cs["geomFile"]
+ and not geometry.checkValidGeomSymmetryCombo(
+ self.geomType, self.coreSymmetry
+ ),
+ "{}, {} is not a valid geometry and symmetry combination. Please update "
+ "either geometry or symmetry on the geom file.".format(
+ str(self.geomType), str(self.coreSymmetry)
+ ),
+ "",
+ self.NO_ACTION,
+ )
+
+
+def createQueryRevertBadPathToDefault(inspector, settingName, initialLambda=None):
+ """
+ Return a query to revert a bad path to its default.
+
+ Parameters
+ ----------
+ inspector: Inspector
+ the inspector who's settings are being queried
+ settingName: str
+ name of the setting to inspect
+ initialLambda: None or callable function
+ If ``None``, the callable argument for :py:meth:`addQuery` is does the setting's path exist.
+ If more complicated callable arguments are needed, they can be passed in as the ``initialLambda`` setting.
+ """
+ if initialLambda is None:
+ initialLambda = lambda: (
+ not os.path.exists(pathTools.armiAbsPath(inspector.cs[settingName]))
+ and inspector.cs.getSetting(settingName).offDefault
+ ) # solution is to revert to default
+
+ query = Query(
+ initialLambda,
+ "Setting {} points to a nonexistent location:\n{}".format(
+ settingName, inspector.cs[settingName]
+ ),
+ "Revert to default location?",
+ inspector.cs.getSetting(settingName).revertToDefault,
+ )
+ return query
+
+
+def validateVersion(versionThis: str, versionRequired: str) -> bool:
+ """Helper function to allow users to verify that their version matches the settings file.
+
+ Parameters
+ ----------
+ versionThis: str
+ The version of this ARMI, App, or Plugin.
+ This MUST be in the form: 1.2.3
+ versionRequired: str
+ The version to compare against, say in a Settings file.
+ This must be in one of the forms: 1.2.3, 1.2, or 1
+
+ Returns
+ -------
+ bool
+ Does this version match the version in the Settings file/object?
+ """
+ fullV = "\d+\.\d+\.\d+"
+ medV = "\d+\.\d+"
+ minV = "\d+"
+
+ if versionRequired == "uncontrolled":
+ # This default flag means we don't want to check the version.
+ return True
+ elif re.search(fullV, versionThis) is None:
+ raise ValueError(
+ "The input version ({0}) does not match the required format: {1}".format(
+ versionThis, fullV
+ )
+ )
+ elif re.search(fullV, versionRequired) is not None:
+ return versionThis == versionRequired
+ elif re.search(medV, versionRequired) is not None:
+ return ".".join(versionThis.split(".")[:2]) == versionRequired
+ elif re.search(minV, versionRequired) is not None:
+ return versionThis.split(".")[0] == versionRequired
+ else:
+ raise ValueError(
+ "The required version is not a valid format: {}".format(versionRequired)
+ )
diff --git a/armi/operators/tests/test_inspectors.py b/armi/settings/tests/test_inspectors.py
similarity index 98%
rename from armi/operators/tests/test_inspectors.py
rename to armi/settings/tests/test_inspectors.py
index 3645bf0a1..fe6361759 100644
--- a/armi/operators/tests/test_inspectors.py
+++ b/armi/settings/tests/test_inspectors.py
@@ -19,8 +19,8 @@
from armi import context
from armi import operators
from armi import settings
-from armi.operators import settingsValidation
-from armi.operators.settingsValidation import createQueryRevertBadPathToDefault
+from armi.settings import settingsValidation
+from armi.settings.settingsValidation import createQueryRevertBadPathToDefault
from armi.utils import directoryChangers
diff --git a/armi/settings/tests/test_settings.py b/armi/settings/tests/test_settings.py
index 79c29f812..4deeb9d87 100644
--- a/armi/settings/tests/test_settings.py
+++ b/armi/settings/tests/test_settings.py
@@ -26,7 +26,6 @@
from armi import getPluginManagerOrFail
from armi import plugins
from armi import settings
-from armi.operators.settingsValidation import Inspector, validateVersion
from armi.physics.fuelCycle import FuelHandlerPlugin
from armi.physics.fuelCycle.settings import CONF_CIRCULAR_RING_ORDER
from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC
@@ -34,6 +33,7 @@
from armi.reactor.flags import Flags
from armi.settings import caseSettings
from armi.settings import setting
+from armi.settings.settingsValidation import Inspector, validateVersion
from armi.tests import TEST_ROOT, ARMI_RUN_PATH
from armi.utils import directoryChangers
from armi.utils.customExceptions import NonexistentSetting
@@ -151,7 +151,7 @@ def test_addingOptions(self):
# modify the default/text settings YAML file to include neutronicsKernel
fin = os.path.join(TEST_ROOT, "armiRun.yaml")
txt = open(fin, "r").read()
- txt = txt.replace("\n nodeGroup:", "\n neutronicsKernel: MCNP\n nodeGroup:")
+ txt = txt.replace("\n nCycles:", "\n neutronicsKernel: MCNP\n nCycles:")
fout = "test_addingOptions.yaml"
open(fout, "w").write(txt)
@@ -312,7 +312,7 @@ def test_default(self):
:id: T_ARMI_SETTINGS_DEFAULTS
:tests: R_ARMI_SETTINGS_DEFAULTS
"""
- a = setting.Setting("testsetting", 0)
+ a = setting.Setting("testsetting", 0, description="whatever")
newDefault = setting.Default(5, "testsetting")
a.changeDefault(newDefault)
self.assertEqual(a.value, 5)
@@ -322,7 +322,7 @@ def test_getSettingsSetByUser(self):
settingsList = cs.getSettingsSetByUser(ARMI_RUN_PATH)
# This test is dependent on the current setup of armiRun.yaml, which includes
# some default settings values
- for sett in ["availabilityFactor", "economics"]:
+ for sett in ["availabilityFactor", "db"]:
self.assertIn(sett, settingsList)
self.assertNotIn("numProcessors", settingsList)
@@ -399,7 +399,7 @@ def test_copySetting(self):
removed by Setting.__getstate__, and that has been a problem in the past.
"""
# get a baseline: show how the Setting object looks to start
- s1 = setting.Setting("testCopy", 765)
+ s1 = setting.Setting("testCopy", 765, description="whatever")
self.assertEqual(s1.name, "testCopy")
self.assertEqual(s1._value, 765)
self.assertTrue(hasattr(s1, "schema"))
@@ -417,7 +417,7 @@ def test_copySettingNotDefault(self):
when the Setting value is set to a non-default value.
"""
# get a baseline: show how the Setting object looks to start
- s1 = setting.Setting("testCopy", 765)
+ s1 = setting.Setting("testCopy", 765, description="whatever")
s1.value = 999
self.assertEqual(s1.name, "testCopy")
self.assertEqual(s1._value, 999)
@@ -487,7 +487,9 @@ def test_flagListSetting(self):
flagsAsStringList = ["DUCT", "FUEL", "CLAD"]
flagsAsFlagList = [Flags.DUCT, Flags.FUEL, Flags.CLAD]
- fs = setting.FlagListSetting(name="testFlagSetting", default=[])
+ fs = setting.FlagListSetting(
+ name="testFlagSetting", default=[], description="whatever"
+ )
# Set the value as a list of strings first
fs.value = flagsAsStringList
self.assertEqual(fs.value, flagsAsFlagList)
@@ -500,7 +502,9 @@ def test_flagListSetting(self):
def test_invalidFlagListTypeError(self):
"""Test raising a TypeError when a list is not provided."""
- fs = setting.FlagListSetting(name="testFlagSetting", default=[])
+ fs = setting.FlagListSetting(
+ name="testFlagSetting", default=[], description="whatever"
+ )
with self.assertRaises(TypeError):
fs.value = "DUCT"
diff --git a/armi/settings/tests/test_settingsIO.py b/armi/settings/tests/test_settingsIO.py
index a2c1185cc..36dd72371 100644
--- a/armi/settings/tests/test_settingsIO.py
+++ b/armi/settings/tests/test_settingsIO.py
@@ -96,9 +96,19 @@ class SettingsRenameTests(unittest.TestCase):
"testSetting1",
default=None,
oldNames=[("oSetting1", None), ("osetting1", datetime.date.today())],
+ description="Just a unit test setting.",
+ ),
+ setting.Setting(
+ "testSetting2",
+ default=None,
+ oldNames=[("oSetting2", None)],
+ description="Just a unit test setting.",
+ ),
+ setting.Setting(
+ "testSetting3",
+ default=None,
+ description="Just a unit test setting.",
),
- setting.Setting("testSetting2", default=None, oldNames=[("oSetting2", None)]),
- setting.Setting("testSetting3", default=None),
]
def test_rename(self):
@@ -123,7 +133,10 @@ def test_collidingRenames(self):
for setting in self.testSettings
+ [
setting.Setting(
- "someOtherSetting", default=None, oldNames=[("oSetting1", None)]
+ "someOtherSetting",
+ default=None,
+ oldNames=[("oSetting1", None)],
+ description="Just a unit test setting.",
)
]
}
diff --git a/armi/tests/1DslabXSByCompTest.yaml b/armi/tests/1DslabXSByCompTest.yaml
index 0ea6d2b0b..ce9cf6e85 100644
--- a/armi/tests/1DslabXSByCompTest.yaml
+++ b/armi/tests/1DslabXSByCompTest.yaml
@@ -1,187 +1,216 @@
nuclide flags:
- NA23: {burn: false, xs: true}
- FE: {burn: false, xs: true}
- U235: {burn: false, xs: true}
- U238: {burn: false, xs: true}
- PU239: {burn: false, xs: true}
- PU240: {burn: false, xs: true}
- PU241: {burn: false, xs: true}
+ NA23: {burn: false, xs: true}
+ FE: {burn: false, xs: true}
+ U235: {burn: false, xs: true}
+ U238: {burn: false, xs: true}
+ PU239: {burn: false, xs: true}
+ PU240: {burn: false, xs: true}
+ PU241: {burn: false, xs: true}
custom isotopics:
- eUranium:
- input format: number densities
- U235: 0.025
- U238: 0.02
- PuUranium:
- input format: number densities
- PU239: 0.02
- PU240: 0.0075
- PU241: 0.0025
- U238: 0.015
- depletedUranium:
- input format: number densities
- U238: 0.045
- sodium:
- input format: number densities
- NA23: 0.02
- structuralSteel:
- input format: number densities
- FE: 0.07
- eUraniumHalf:
- input format: number densities
- U235: 0.0125
- U238: 0.01
+ eUranium:
+ input format: number densities
+ U235: 0.025
+ U238: 0.02
+ PuUranium:
+ input format: number densities
+ PU239: 0.02
+ PU240: 0.0075
+ PU241: 0.0025
+ U238: 0.015
+ depletedUranium:
+ input format: number densities
+ U238: 0.045
+ sodium:
+ input format: number densities
+ NA23: 0.02
+ structuralSteel:
+ input format: number densities
+ FE: 0.07
+ eUraniumHalf:
+ input format: number densities
+ U235: 0.0125
+ U238: 0.01
blocks:
- eu fuel block: &block_eufuelblock
- depleted_uranium: &component_eufuelblock_depleted_uranium
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: depletedUranium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 1.0
- enriched_uranium fuel:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: eUranium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 2.0
- sodium: &component_eufuelblock_sodium
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: sodium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 1.0
- iron: &component_eufuelblock_iron
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: structuralSteel
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 4.0
- latticeboundarycell: &component_eufuelblock_latticeboundarycell
- shape: Rectangle
- material: Void
- Tinput: 20.0
- Thot: 20.0
- lengthInner: 1.0
- lengthOuter: 1.0
- mult: 1.0
- widthInner: 8.0
- widthOuter: 8.0
- reversedeu fuel block: &block_reversedeufuelblock
- iron: *component_eufuelblock_iron
- sodium: *component_eufuelblock_sodium
- enriched_uranium fuel:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: eUraniumHalf
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 2.0
- depleted_uranium: *component_eufuelblock_depleted_uranium
- latticeboundarycell: *component_eufuelblock_latticeboundarycell
- inheritseublocks: &block_inheritseublocks
- sodium:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: sodium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 0.5
- pu(fuel):
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: PuUranium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 3.0
- iron:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: structuralSteel
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 1.0
- pu(fuel)2:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: PuUranium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 0.5
- iron2:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: structuralSteel
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 3.0
- latticeboundarycell: *component_eufuelblock_latticeboundarycell
- blanket fuel block: &block_blanketfuelblock
- depleted_uranium fuel 1: *component_eufuelblock_depleted_uranium
- sodium:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: sodium
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 6.0
- depleted_uranium(fuel)2: *component_eufuelblock_depleted_uranium
- latticeboundarycell: *component_eufuelblock_latticeboundarycell
- reflectorblockinheritsblanket: &block_reflectorblockinheritsblanket
- iron:
- shape: SolidRectangle
- material: Custom
- Tinput: 20.0
- Thot: 20.0
- isotopics: structuralSteel
- lengthOuter: 1.0
- mult: 1.0
- widthOuter: 8.0
- latticeboundarycell: *component_eufuelblock_latticeboundarycell
+ eu fuel block: &block_eufuelblock
+ depleted_uranium: &component_eufuelblock_depleted_uranium
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: depletedUranium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 1.0
+ enriched_uranium fuel:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: eUranium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 2.0
+ sodium: &component_eufuelblock_sodium
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: sodium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 1.0
+ iron: &component_eufuelblock_iron
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: structuralSteel
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 4.0
+ latticeboundarycell: &component_eufuelblock_latticeboundarycell
+ shape: Rectangle
+ material: Void
+ Tinput: 20.0
+ Thot: 20.0
+ lengthInner: 1.0
+ lengthOuter: 1.0
+ mult: 1.0
+ widthInner: 8.0
+ widthOuter: 8.0
+ reversedeu fuel block: &block_reversedeufuelblock
+ iron: *component_eufuelblock_iron
+ sodium: *component_eufuelblock_sodium
+ enriched_uranium fuel:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: eUraniumHalf
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 2.0
+ depleted_uranium: *component_eufuelblock_depleted_uranium
+ latticeboundarycell: *component_eufuelblock_latticeboundarycell
+ inheritseublocks: &block_inheritseublocks
+ sodium:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: sodium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 0.5
+ pu(fuel):
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: PuUranium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 3.0
+ iron:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: structuralSteel
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 1.0
+ pu(fuel)2:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: PuUranium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 0.5
+ iron2:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: structuralSteel
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 3.0
+ latticeboundarycell: *component_eufuelblock_latticeboundarycell
+ blanket fuel block: &block_blanketfuelblock
+ depleted_uranium fuel 1: *component_eufuelblock_depleted_uranium
+ sodium:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: sodium
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 6.0
+ depleted_uranium(fuel)2: *component_eufuelblock_depleted_uranium
+ latticeboundarycell: *component_eufuelblock_latticeboundarycell
+ reflectorblockinheritsblanket: &block_reflectorblockinheritsblanket
+ iron:
+ shape: SolidRectangle
+ material: Custom
+ Tinput: 20.0
+ Thot: 20.0
+ isotopics: structuralSteel
+ lengthOuter: 1.0
+ mult: 1.0
+ widthOuter: 8.0
+ latticeboundarycell: *component_eufuelblock_latticeboundarycell
assemblies:
- heights: &standard_heights [10.0, 30.0, 30.0, 15.0, 15.0, 30.0, 30.0, 10.0]
- axial mesh points: &standard_axial_mesh_points [1, 2, 2, 1, 1, 2, 2, 1]
- feed fuel:
- specifier: DrawerSet1
- blocks: [*block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_eufuelblock, *block_inheritseublocks, *block_reversedeufuelblock, *block_eufuelblock, *block_blanketfuelblock, *block_reflectorblockinheritsblanket]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: &feed_fuel_xs_types [AZ, AZ, AA, AA, AA, AA, AZ, AZ]
- drawerset2:
- specifier: DrawerSet2
- blocks: [*block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_inheritseublocks, *block_eufuelblock, *block_reversedeufuelblock, *block_reversedeufuelblock, *block_blanketfuelblock,
- *block_reflectorblockinheritsblanket]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *feed_fuel_xs_types
- NotInCoreButGetBlocks:
- specifier: NotInCore
- blocks: [*block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_eufuelblock, *block_inheritseublocks, *block_reversedeufuelblock, *block_eufuelblock, *block_blanketfuelblock, *block_reflectorblockinheritsblanket]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: [AZ, AZ, AC, AC, AC, AC, AZ, AZ]
+ heights: &standard_heights [10.0, 30.0, 30.0, 15.0, 15.0, 30.0, 30.0, 10.0]
+ axial mesh points: &standard_axial_mesh_points [1, 2, 2, 1, 1, 2, 2, 1]
+ feed fuel:
+ specifier: DrawerSet1
+ blocks:
+ [
+ *block_reflectorblockinheritsblanket,
+ *block_blanketfuelblock,
+ *block_eufuelblock,
+ *block_inheritseublocks,
+ *block_reversedeufuelblock,
+ *block_eufuelblock,
+ *block_blanketfuelblock,
+ *block_reflectorblockinheritsblanket,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: &feed_fuel_xs_types [AZ, AZ, AA, AA, AA, AA, AZ, AZ]
+ drawerset2:
+ specifier: DrawerSet2
+ blocks:
+ [
+ *block_reflectorblockinheritsblanket,
+ *block_blanketfuelblock,
+ *block_inheritseublocks,
+ *block_eufuelblock,
+ *block_reversedeufuelblock,
+ *block_reversedeufuelblock,
+ *block_blanketfuelblock,
+ *block_reflectorblockinheritsblanket,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *feed_fuel_xs_types
+ NotInCoreButGetBlocks:
+ specifier: NotInCore
+ blocks:
+ [
+ *block_reflectorblockinheritsblanket,
+ *block_blanketfuelblock,
+ *block_eufuelblock,
+ *block_inheritseublocks,
+ *block_reversedeufuelblock,
+ *block_eufuelblock,
+ *block_blanketfuelblock,
+ *block_reflectorblockinheritsblanket,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: [AZ, AZ, AC, AC, AC, AC, AZ, AZ]
diff --git a/armi/tests/Godiva-blueprints.yaml b/armi/tests/Godiva-blueprints.yaml
deleted file mode 100644
index 519725dd4..000000000
--- a/armi/tests/Godiva-blueprints.yaml
+++ /dev/null
@@ -1,485 +0,0 @@
-nuclide flags:
- PU237: {burn: false, xs: true, expandTo: []}
- PU240: {burn: false, xs: true, expandTo: []}
- PU241: {burn: false, xs: true, expandTo: []}
- AR: {burn: false, xs: true, expandTo: []}
- PA233: {burn: false, xs: true, expandTo: []}
- NP238: {burn: false, xs: true, expandTo: []}
- AR36: {burn: false, xs: true, expandTo: []}
- TH230: {burn: false, xs: true, expandTo: []}
- AR38: {burn: false, xs: true, expandTo: []}
- U238: {burn: false, xs: true, expandTo: []}
- U239: {burn: false, xs: true, expandTo: []}
- C: {burn: false, xs: true, expandTo: []}
- LFP35: {burn: false, xs: true, expandTo: []}
- U233: {burn: false, xs: true, expandTo: []}
- U234: {burn: false, xs: true, expandTo: []}
- U235: {burn: false, xs: true, expandTo: []}
- U236: {burn: false, xs: true, expandTo: []}
- U237: {burn: false, xs: true, expandTo: []}
- PU239: {burn: false, xs: true, expandTo: []}
- PU238: {burn: false, xs: true, expandTo: []}
- TH234: {burn: false, xs: true, expandTo: []}
- TH232: {burn: false, xs: true, expandTo: []}
- AR40: {burn: false, xs: true, expandTo: []}
- LFP39: {burn: false, xs: true, expandTo: []}
- DUMP2: {burn: false, xs: true, expandTo: []}
- LFP41: {burn: false, xs: true, expandTo: []}
- LFP40: {burn: false, xs: true, expandTo: []}
- PU242: {burn: false, xs: true, expandTo: []}
- PU236: {burn: false, xs: true, expandTo: []}
- U232: {burn: false, xs: true, expandTo: []}
- DUMP1: {burn: false, xs: true, expandTo: []}
- LFP38: {burn: false, xs: true, expandTo: []}
- AM243: {burn: false, xs: true, expandTo: []}
- PA231: {burn: false, xs: true, expandTo: []}
- CM244: {burn: false, xs: true, expandTo: []}
- CM242: {burn: false, xs: true, expandTo: []}
- AM242: {burn: false, xs: true, expandTo: []}
- CM245: {burn: false, xs: true, expandTo: []}
- CM243: {burn: false, xs: true, expandTo: []}
- CM246: {burn: false, xs: true, expandTo: []}
- CM247: {burn: false, xs: true, expandTo: []}
- O: {burn: false, xs: true, expandTo: [O16]}
- N: {burn: false, xs: true, expandTo: [N14]}
- ZR: {burn: false, xs: true, expandTo: []}
-custom isotopics: {}
-blocks: {}
-assemblies:
- heights:
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- axial mesh points:
- - 5
- - 5
- - 5
- - 5
- - 5
- assembly1_1:
- specifier: assembly1_1
- blocks:
- - name: block1_1_1
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 0.9226919412612915
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 0.0773080587387085
- - name: block1_1_2
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 1.0
- - name: block1_1_3
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 1.0
- - name: block1_1_4
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 1.0
- - name: block1_1_5
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 0.9271114468574524
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 3.001
- inner_radius: 0.0
- mult: 0.07288855314254761
- height:
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- axial mesh points:
- - 5
- - 5
- - 5
- - 5
- - 5
- radial mesh points: 2
- azimuthal mesh points: 7
- material modifications:
- U235_wt_frac:
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- ZR_wt_frac:
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- xs types:
- - A
- - A
- - A
- - A
- - A
- assembly2_1:
- specifier: assembly2_1
- blocks:
- - name: block2_1_1
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 0.5954532027244568
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 0.4045467972755432
- - name: block2_1_2
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 1.0
- - name: block2_1_3
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 1.0
- - name: block2_1_4
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 1.0
- - name: block2_1_5
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 0.5924441814422607
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 6.002
- inner_radius: 3.001
- mult: 0.40755581855773926
- height:
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- axial mesh points:
- - 5
- - 5
- - 5
- - 5
- - 5
- radial mesh points: 2
- azimuthal mesh points: 7
- material modifications:
- U235_wt_frac:
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- ZR_wt_frac:
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- xs types:
- - A
- - A
- - A
- - A
- - A
- assembly3_1:
- specifier: assembly3_1
- blocks:
- - name: block3_1_1
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.046154800802469254
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.9538451991975307
- - name: block3_1_2
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.6035306453704834
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.3964693546295166
- - name: block3_1_3
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.8756284713745117
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.12437152862548828
- - name: block3_1_4
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.5993080139160156
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.4006919860839844
- - name: block3_1_5
- Godiva:
- shape: RadialSegment
- material: UZr
- Tinput: 26.85
- Thot: 26.85
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.04680449143052101
- compliment:
- shape: RadialSegment
- material: Air
- Tinput: 0.0
- Thot: 0.0
- outer_theta: 0.7853981633974483
- height: 3.5
- inner_theta: 0.0
- outer_radius: 9.0
- inner_radius: 6.002
- mult: 0.953195508569479
- height:
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- - 3.5
- axial mesh points:
- - 5
- - 5
- - 5
- - 5
- - 5
- radial mesh points: 2
- azimuthal mesh points: 7
- material modifications:
- U235_wt_frac:
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- - 0.9371
- ZR_wt_frac:
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- - 0.0
- xs types:
- - A
- - A
- - A
- - A
- - A
-systems:
- core:
- grid name: core
- origin:
- x: 0.0
- y: 0.0
- z: 0.0
-grids:
- core:
- geom: thetarz
- lattice map:
- grid bounds:
- r:
- - 0.0
- - 3.001
- - 6.002
- - 9.0
- theta:
- - 0.0
- - 0.7853981633974483
- z:
- - -8.75
- - -5.25
- - -1.7500000000000002
- - 1.7500000000000002
- - 5.25
- - 8.75
- symmetry: eighth periodic
- grid contents:
- ? - 0
- - 0
- : assembly1_1
- ? - 0
- - 1
- : assembly2_1
- ? - 0
- - 2
- : assembly3_1
diff --git a/armi/tests/ThRZSettings.yaml b/armi/tests/ThRZSettings.yaml
index 8586ee1b6..f9ef807ac 100644
--- a/armi/tests/ThRZSettings.yaml
+++ b/armi/tests/ThRZSettings.yaml
@@ -1,15 +1,19 @@
metadata:
version: uncontrolled
settings:
+# global
branchVerbosity: info
burnSteps: 0
- comment: 'Revised benchmark '
- db: false
- economics: false
- genXS: Neutron
+ comment: "Revised benchmark "
geomFile: ThRZGeom.xml
- groupStructure: ARMI45
loadingFile: ThRZloading.yaml
numProcessors: 12
outputFileExtension: png
power: 1000000.0
+
+# database
+ db: false
+
+# neutronics
+ genXS: Neutron
+ groupStructure: ARMI45
diff --git a/armi/tests/ThRZloading.yaml b/armi/tests/ThRZloading.yaml
index 7da85bc95..665a5d347 100644
--- a/armi/tests/ThRZloading.yaml
+++ b/armi/tests/ThRZloading.yaml
@@ -1,124 +1,146 @@
custom isotopics: {}
blocks:
- fuel: &block_fuel
- bond:
- shape: Circle
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- id: fuel.od
- mult: fuel.mult
- od: clad.id
- clad:
- shape: Circle
- material: HT9
- Tinput: 470.0
- Thot: 470.0
- id: 1.0
- mult: fuel.mult
- od: 1.09
- coolant: &component_fuel_coolant
- shape: UnshapedComponent
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- area: 66.0
- duct: &component_fuel_duct
- shape: Hexagon
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- ip: 16.0
- mult: 1.0
- op: 16.6
- fuel:
- shape: Circle
- material: UZr
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 169.0
- od: 0.87
- intercoolant: &component_fuel_intercoolant
- shape: Hexagon
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- ip: duct.op
- mult: 1.0
- op: 16.75
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 30.
- helixDiameter: 1.2
- id: 0.0
- mult: fuel.mult
- od: 0.1
- reflector: &block_reflector
- clad:
- shape: Circle
- material: HT9
- Tinput: 470.0
- Thot: 470.0
- id: 1.0
- mult: reflector.mult
- od: 1.09
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- reflector:
- shape: Circle
- material: HT9
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 169.0
- od: 1.0
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 30.
- helixDiameter: 1.2
- id: 0.0
- mult: reflector.mult
- od: 0.1
-assemblies:
- heights: &standard_heights [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
- axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 1, 1, 1, 1, 1]
+ fuel: &block_fuel
+ bond:
+ shape: Circle
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ id: fuel.od
+ mult: fuel.mult
+ od: clad.id
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 470.0
+ Thot: 470.0
+ id: 1.0
+ mult: fuel.mult
+ od: 1.09
+ coolant: &component_fuel_coolant
+ shape: UnshapedComponent
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ area: 66.0
+ duct: &component_fuel_duct
+ shape: Hexagon
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ ip: 16.0
+ mult: 1.0
+ op: 16.6
fuel:
- specifier: IC
- blocks: &fuel_blocks [*block_reflector, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_reflector]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- radial mesh points: 4
- azimuthal mesh points: 4
- material modifications:
- U235_wt_frac: ['', 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, '']
- ZR_wt_frac: &fuel_zr_wt_frac ['', 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, '']
- xs types: &fuel_xs_types [A, A, A, A, A, A, A, A, A]
- blanket fuel:
- specifier: MC
- blocks: *fuel_blocks
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- radial mesh points: 4
- azimuthal mesh points: 4
- material modifications:
- U235_wt_frac: ['', 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, '']
- ZR_wt_frac: *fuel_zr_wt_frac
- xs types: *fuel_xs_types
+ shape: Circle
+ material: UZr
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 169.0
+ od: 0.87
+ intercoolant: &component_fuel_intercoolant
+ shape: Hexagon
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ ip: duct.op
+ mult: 1.0
+ op: 16.75
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 30.
+ helixDiameter: 1.2
+ id: 0.0
+ mult: fuel.mult
+ od: 0.1
+ reflector: &block_reflector
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 470.0
+ Thot: 470.0
+ id: 1.0
+ mult: reflector.mult
+ od: 1.09
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
reflector:
- specifier: RR
- blocks: [*block_reflector, *block_reflector, *block_reflector, *block_reflector, *block_reflector, *block_reflector, *block_reflector, *block_reflector, *block_reflector]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- radial mesh points: 4
- azimuthal mesh points: 4
- xs types: *fuel_xs_types
-
-
+ shape: Circle
+ material: HT9
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 169.0
+ od: 1.0
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 30.
+ helixDiameter: 1.2
+ id: 0.0
+ mult: reflector.mult
+ od: 0.1
+assemblies:
+ heights:
+ &standard_heights [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
+ axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 1, 1, 1, 1, 1]
+ fuel:
+ specifier: IC
+ blocks:
+ &fuel_blocks [
+ *block_reflector,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_reflector,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ radial mesh points: 4
+ azimuthal mesh points: 4
+ material modifications:
+ U235_wt_frac: ["", 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, ""]
+ ZR_wt_frac:
+ &fuel_zr_wt_frac ["", 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, 0.06, ""]
+ xs types: &fuel_xs_types [A, A, A, A, A, A, A, A, A]
+ blanket fuel:
+ specifier: MC
+ blocks: *fuel_blocks
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ radial mesh points: 4
+ azimuthal mesh points: 4
+ material modifications:
+ U235_wt_frac: ["", 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, ""]
+ ZR_wt_frac: *fuel_zr_wt_frac
+ xs types: *fuel_xs_types
+ reflector:
+ specifier: RR
+ blocks:
+ [
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ *block_reflector,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ radial mesh points: 4
+ azimuthal mesh points: 4
+ xs types: *fuel_xs_types
diff --git a/armi/tests/armiRun.yaml b/armi/tests/armiRun.yaml
index f28bef698..c8b8d2e06 100644
--- a/armi/tests/armiRun.yaml
+++ b/armi/tests/armiRun.yaml
@@ -1,13 +1,31 @@
settings:
+# global
availabilityFactor: 1
beta: 0.003454
- BOL: true
branchVerbosity: debug
buGroups:
- 100
burnSteps: 2
- clusterExclusive: false
comment: Simple test input.
+ cycleLength: 2000.0
+ detailAssemLocationsBOL:
+ - 002-001
+ freshFeedType: igniter fuel
+ loadingFile: refSmallReactor.yaml
+ moduleVerbosity:
+ armi.reactor.reactors: info
+ nCycles: 6
+ outputFileExtension: png
+ power: 100000000.0
+ smallRun: true
+ startCycle: 1
+ startNode: 2
+ targetK: 1.002
+ verbosity: extra
+ versions:
+ armi: uncontrolled
+
+# cross section
crossSectionControl:
DA:
geometry: 0D
@@ -33,40 +51,20 @@ settings:
- gap
numInternalRings: 1
numExternalRings: 1
- cycleLength: 2000.0
+
+# database
db: false
- detailAssemLocationsBOL:
- - 002-001
- economics: false
- epsBurnTime: 0.001
- epsFSAvg: 1e-06
- epsFSPoint: 1e-06
- eqRingSchedule:
- - 13
- - 1
- freshFeedType: igniter fuel
+
+# fuel cycle
fuelHandlerName: EquilibriumShuffler
jumpRingNum: 9
- loadingFile: refSmallReactor.yaml
- startCycle: 1
- startNode: 2
+ shuffleLogic: refSmallReactorShuffleLogic.py
+
+# neutronics
+ epsFSAvg: 1e-06
+ epsFSPoint: 1e-06
loadPadElevation: 200.0
- max2SigmaCladIDT: 630.0
+
+# report
genReports: false
- maxFlowZones: 12
- maxRegionDensityIterations: 5
- moduleVerbosity:
- armi.reactor.reactors: info
- nCycles: 6
- nodeGroup: OnlineNodes,TP
- outputFileExtension: png
- percentNaReduction: 10.0
- power: 100000000.0
- shuffleLogic: refSmallReactorShuffleLogic.py
- smallRun: true
summarizeAssemDesign: false
- targetK: 1.002
- transientForSensitivity: ''
- verbosity: extra
- versions:
- armi: uncontrolled
diff --git a/armi/tests/detailedAxialExpansion/armiRun.yaml b/armi/tests/detailedAxialExpansion/armiRun.yaml
index feda7a6c8..c3d57f799 100644
--- a/armi/tests/detailedAxialExpansion/armiRun.yaml
+++ b/armi/tests/detailedAxialExpansion/armiRun.yaml
@@ -1,12 +1,29 @@
settings:
- axialExpansion: true
- detailedAxialExpansion: true
+# global
beta: 0.003454
branchVerbosity: debug
buGroups:
- 100
burnSteps: 2
comment: Simple test input with detailed axial expansion.
+ cycleLength: 2000.0
+ detailAssemLocationsBOL:
+ - 002-001
+ detailedAxialExpansion: true
+ freshFeedType: igniter fuel
+ loadingFile: refSmallReactor.yaml
+ moduleVerbosity:
+ armi.reactor.reactors: info
+ nCycles: 6
+ outputFileExtension: png
+ power: 100000000.0
+ startNode: 1
+ targetK: 1.002
+ verbosity: extra
+ versions:
+ armi: uncontrolled
+
+# cross section
crossSectionControl:
DA:
geometry: 0D
@@ -19,26 +36,22 @@ settings:
XA:
xsFileLocation:
- ISOXA
- cycleLength: 2000.0
+
+# database
db: false
- detailAssemLocationsBOL:
- - 002-001
- epsFSAvg: 1e-06
- epsFSPoint: 1e-06
- freshFeedType: igniter fuel
+
+# fuel cycle
fuelHandlerName: EquilibriumShuffler
jumpRingNum: 9
- loadingFile: refSmallReactor.yaml
- startNode: 1
+
+# fuel performance
+ axialExpansion: true
+
+# neutronics
+ epsFSAvg: 1e-06
+ epsFSPoint: 1e-06
loadPadElevation: 162.5
+
+# report
genReports: false
- moduleVerbosity:
- armi.reactor.reactors: info
- nCycles: 6
- outputFileExtension: png
- power: 100000000.0
summarizeAssemDesign: false
- targetK: 1.002
- verbosity: extra
- versions:
- armi: uncontrolled
diff --git a/armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml b/armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml
index 623b8b8b9..65fb02d81 100644
--- a/armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml
+++ b/armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml
@@ -41,7 +41,7 @@ blocks:
ip: grid.op
mult: 1.0
op: 16.75
-
+
duct: &block_duct
coolant: *component_coolant
duct: &component_duct
@@ -61,7 +61,7 @@ blocks:
mult: 1.0
op: 16.75
- SodiumBlock : &block_dummy
+ SodiumBlock: &block_dummy
flags: dummy
coolant:
shape: Hexagon
@@ -109,11 +109,11 @@ blocks:
id: 0.0
mult: shield.mult
od: 0.10056
- coolant: *component_coolant
+ coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
- fuel: &block_fuel
+ fuel: &block_fuel
fuel: &component_fuel_fuel
shape: Circle
material: UZr
@@ -151,7 +151,7 @@ blocks:
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
+
plenum: &block_plenum
gap: &component_plenum_gap
shape: Circle
@@ -182,8 +182,8 @@ blocks:
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
- aclp plenum : &block_aclp
+
+ aclp plenum: &block_aclp
gap: *component_plenum_gap
clad: *component_plenum_clad
wire: *component_plenum_wire
@@ -232,18 +232,18 @@ blocks:
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
+
lta fuel a: &block_lta1_fuel
fuel: *component_fuel_fuel
bond: *component_fuel_bond
liner2: *component_fuel2_liner2
liner1: *component_fuel2_liner1
- clad: *component_fuel_clad
+ clad: *component_fuel_clad
wire: *component_fuel_wire
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
+
lta fuel b: &block_lta2_fuel
fuel:
shape: Circle
@@ -257,12 +257,12 @@ blocks:
bond: *component_fuel_bond
liner2: *component_fuel2_liner2
liner1: *component_fuel2_liner1
- clad: *component_fuel_clad
+ clad: *component_fuel_clad
wire: *component_fuel_wire
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
+
annular fuel gap: &block_fuel3
gap1:
shape: Circle
@@ -333,7 +333,7 @@ blocks:
coolant: *component_coolant
duct: *component_duct
intercoolant: *component_intercoolant
-
+
## ------------------------------------------------------------------------------------
## control
moveable duct: &block_ctrl_duct
@@ -389,7 +389,7 @@ blocks:
helixDiameter: 1.771
id: 0.0
mult: clad.mult
- od: 0.085
+ od: 0.085
innerDuct: &component_control_innerDuct
shape: Hexagon
material: HT9
@@ -401,7 +401,7 @@ blocks:
duct: *component_control_duct
coolant: *component_coolant
intercoolant: *component_control_intercoolant
-
+
moveable plenum: &block_control_plenum
gap: *component_control_gap
clad: *component_control_clad
@@ -495,7 +495,7 @@ assemblies:
xs types: *igniter_fuel_xs_types
lead test fuel:
specifier: LA
- blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_lta1_fuel, *block_lta1_fuel, *block_lta1_fuel, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy]
+ blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_lta1_fuel, *block_lta1_fuel, *block_lta1_fuel, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy]
height: *highOffset_height
axial mesh points: *standard_axial_mesh_points
material modifications:
diff --git a/armi/tests/godiva/godiva-blueprints.yaml b/armi/tests/godiva/godiva-blueprints.yaml
new file mode 100644
index 000000000..26d508d6a
--- /dev/null
+++ b/armi/tests/godiva/godiva-blueprints.yaml
@@ -0,0 +1,485 @@
+nuclide flags:
+ PU237: {burn: false, xs: true, expandTo: []}
+ PU240: {burn: false, xs: true, expandTo: []}
+ PU241: {burn: false, xs: true, expandTo: []}
+ AR: {burn: false, xs: true, expandTo: []}
+ PA233: {burn: false, xs: true, expandTo: []}
+ NP238: {burn: false, xs: true, expandTo: []}
+ AR36: {burn: false, xs: true, expandTo: []}
+ TH230: {burn: false, xs: true, expandTo: []}
+ AR38: {burn: false, xs: true, expandTo: []}
+ U238: {burn: false, xs: true, expandTo: []}
+ U239: {burn: false, xs: true, expandTo: []}
+ C: {burn: false, xs: true, expandTo: []}
+ LFP35: {burn: false, xs: true, expandTo: []}
+ U233: {burn: false, xs: true, expandTo: []}
+ U234: {burn: false, xs: true, expandTo: []}
+ U235: {burn: false, xs: true, expandTo: []}
+ U236: {burn: false, xs: true, expandTo: []}
+ U237: {burn: false, xs: true, expandTo: []}
+ PU239: {burn: false, xs: true, expandTo: []}
+ PU238: {burn: false, xs: true, expandTo: []}
+ TH234: {burn: false, xs: true, expandTo: []}
+ TH232: {burn: false, xs: true, expandTo: []}
+ AR40: {burn: false, xs: true, expandTo: []}
+ LFP39: {burn: false, xs: true, expandTo: []}
+ DUMP2: {burn: false, xs: true, expandTo: []}
+ LFP41: {burn: false, xs: true, expandTo: []}
+ LFP40: {burn: false, xs: true, expandTo: []}
+ PU242: {burn: false, xs: true, expandTo: []}
+ PU236: {burn: false, xs: true, expandTo: []}
+ U232: {burn: false, xs: true, expandTo: []}
+ DUMP1: {burn: false, xs: true, expandTo: []}
+ LFP38: {burn: false, xs: true, expandTo: []}
+ AM243: {burn: false, xs: true, expandTo: []}
+ PA231: {burn: false, xs: true, expandTo: []}
+ CM244: {burn: false, xs: true, expandTo: []}
+ CM242: {burn: false, xs: true, expandTo: []}
+ AM242: {burn: false, xs: true, expandTo: []}
+ CM245: {burn: false, xs: true, expandTo: []}
+ CM243: {burn: false, xs: true, expandTo: []}
+ CM246: {burn: false, xs: true, expandTo: []}
+ CM247: {burn: false, xs: true, expandTo: []}
+ O: {burn: false, xs: true, expandTo: [O16]}
+ N: {burn: false, xs: true, expandTo: [N14]}
+ ZR: {burn: false, xs: true, expandTo: []}
+custom isotopics: {}
+blocks: {}
+assemblies:
+ heights:
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ axial mesh points:
+ - 5
+ - 5
+ - 5
+ - 5
+ - 5
+ assembly1_1:
+ specifier: assembly1_1
+ blocks:
+ - name: block1_1_1
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 0.9226919412612915
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 0.0773080587387085
+ - name: block1_1_2
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 1.0
+ - name: block1_1_3
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 1.0
+ - name: block1_1_4
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 1.0
+ - name: block1_1_5
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 0.9271114468574524
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 3.001
+ inner_radius: 0.0
+ mult: 0.07288855314254761
+ height:
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ axial mesh points:
+ - 5
+ - 5
+ - 5
+ - 5
+ - 5
+ radial mesh points: 2
+ azimuthal mesh points: 7
+ material modifications:
+ U235_wt_frac:
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ ZR_wt_frac:
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ xs types:
+ - A
+ - A
+ - A
+ - A
+ - A
+ assembly2_1:
+ specifier: assembly2_1
+ blocks:
+ - name: block2_1_1
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 0.5954532027244568
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 0.4045467972755432
+ - name: block2_1_2
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 1.0
+ - name: block2_1_3
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 1.0
+ - name: block2_1_4
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 1.0
+ - name: block2_1_5
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 0.5924441814422607
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 6.002
+ inner_radius: 3.001
+ mult: 0.40755581855773926
+ height:
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ axial mesh points:
+ - 5
+ - 5
+ - 5
+ - 5
+ - 5
+ radial mesh points: 2
+ azimuthal mesh points: 7
+ material modifications:
+ U235_wt_frac:
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ ZR_wt_frac:
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ xs types:
+ - A
+ - A
+ - A
+ - A
+ - A
+ assembly3_1:
+ specifier: assembly3_1
+ blocks:
+ - name: block3_1_1
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.046154800802469254
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.9538451991975307
+ - name: block3_1_2
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.6035306453704834
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.3964693546295166
+ - name: block3_1_3
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.8756284713745117
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.12437152862548828
+ - name: block3_1_4
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.5993080139160156
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.4006919860839844
+ - name: block3_1_5
+ godiva:
+ shape: RadialSegment
+ material: UZr
+ Tinput: 26.85
+ Thot: 26.85
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.04680449143052101
+ compliment:
+ shape: RadialSegment
+ material: Air
+ Tinput: 0.0
+ Thot: 0.0
+ outer_theta: 0.7853981633974483
+ height: 3.5
+ inner_theta: 0.0
+ outer_radius: 9.0
+ inner_radius: 6.002
+ mult: 0.953195508569479
+ height:
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ - 3.5
+ axial mesh points:
+ - 5
+ - 5
+ - 5
+ - 5
+ - 5
+ radial mesh points: 2
+ azimuthal mesh points: 7
+ material modifications:
+ U235_wt_frac:
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ - 0.9371
+ ZR_wt_frac:
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ - 0.0
+ xs types:
+ - A
+ - A
+ - A
+ - A
+ - A
+systems:
+ core:
+ grid name: core
+ origin:
+ x: 0.0
+ y: 0.0
+ z: 0.0
+grids:
+ core:
+ geom: thetarz
+ lattice map:
+ grid bounds:
+ r:
+ - 0.0
+ - 3.001
+ - 6.002
+ - 9.0
+ theta:
+ - 0.0
+ - 0.7853981633974483
+ z:
+ - -8.75
+ - -5.25
+ - -1.7500000000000002
+ - 1.7500000000000002
+ - 5.25
+ - 8.75
+ symmetry: eighth periodic
+ grid contents:
+ ? - 0
+ - 0
+ : assembly1_1
+ ? - 0
+ - 1
+ : assembly2_1
+ ? - 0
+ - 2
+ : assembly3_1
diff --git a/armi/tests/Godiva.armi.unittest.yaml b/armi/tests/godiva/godiva.armi.unittest.yaml
similarity index 93%
rename from armi/tests/Godiva.armi.unittest.yaml
rename to armi/tests/godiva/godiva.armi.unittest.yaml
index 8aa29ea24..3ffa71b43 100644
--- a/armi/tests/Godiva.armi.unittest.yaml
+++ b/armi/tests/godiva/godiva.armi.unittest.yaml
@@ -12,7 +12,7 @@ settings:
genReports: false
genXS: Neutron
groupStructure: ARMI45
- loadingFile: Godiva-blueprints.yaml
+ loadingFile: godiva-blueprints.yaml
neutronicsKernel: DIF3D-FD
neutronicsOutputsToSave: All
neutronicsType: both
diff --git a/armi/tests/refSmallCartesian.yaml b/armi/tests/refSmallCartesian.yaml
index 18066c5bf..fae2134ee 100644
--- a/armi/tests/refSmallCartesian.yaml
+++ b/armi/tests/refSmallCartesian.yaml
@@ -1,309 +1,337 @@
custom isotopics:
- MOX:
- input format: number densities
- AM241: 2.3606e-05
- PU238: 3.7387e-06
- PU239: 0.00286038
- PU240: 0.000712945
- PU241: 9.82312e-05
- PU242: 2.02221e-05
- U235: 0.00405533
- U238: 0.0134125
+ MOX:
+ input format: number densities
+ AM241: 2.3606e-05
+ PU238: 3.7387e-06
+ PU239: 0.00286038
+ PU240: 0.000712945
+ PU241: 9.82312e-05
+ PU242: 2.02221e-05
+ U235: 0.00405533
+ U238: 0.0134125
blocks:
- fuel: &block_fuel
- fuel:
- shape: Circle
- material: UZr
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- mult: 64.0
- od: 0.7
- clad: &component_fuel_clad
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 470.0
- id: 1.0
- mult: fuel.mult
- od: 1.15
- bond: &component_fuel_bond
- shape: Circle
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- id: fuel.od
- mult: fuel.mult
- od: clad.id
- wire: &component_fuel_wire
- shape: Helix
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- axialPitch: 30.15
- helixDiameter: 1.2
- id: 0.0
- mult: fuel.mult
- od: 0.100
- coolant: &component_fuel_coolant
- shape: DerivedShape
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- duct: &component_fuel_duct
- shape: Rectangle
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- lengthInner: 9.0
- lengthOuter: 9.5
- mult: 1.0
- widthInner: 9.0
- widthOuter: 9.5
- intercoolant: &component_fuel_intercoolant
- shape: Rectangle
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- lengthInner: 9.5
- lengthOuter: 10.0
- mult: 1.0
- widthInner: 9.5
- widthOuter: 10.0
- control: &block_control
- control:
- shape: Circle
- material: B4C
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 25.0
- od: 1.3
- innerduct:
- shape: Rectangle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- lengthInner: 8.0
- lengthOuter: 8.5
- mult: 1.0
- widthInner: 8.0
- widthOuter: 8.5
- duct:
- shape: Rectangle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- lengthInner: 8.7
- lengthOuter: 9.0
- mult: 1.0
- widthInner: 8.7
- widthOuter: 9.0
- clad:
- shape: Circle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- id: 1.35
- mult: control.mult
- od: 1.7
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 50.0
- helixDiameter: 1.7
- id: 0.0
- mult: control.mult
- od: 0.085
- intercoolant: *component_fuel_intercoolant
- gap:
- shape: Circle
- material: Void
- Tinput: 450.0
- Thot: 450.0
- id: control.od
- mult: control.mult
- od: clad.id
- coolant: *component_fuel_coolant
- duct: &block_duct
- duct: &component_duct_duct
- shape: Rectangle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- lengthInner: 9.0
- lengthOuter: 9.5
- mult: 1.0
- widthInner: 9.0
- widthOuter: 9.5
- coolant: *component_fuel_coolant
- intercoolant: *component_fuel_intercoolant
- grid plate: &block_grid_plate
- grid:
- shape: Rectangle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- lengthInner: 0.0
- lengthOuter: 9.5
- mult: 1.0
- widthInner: 0.0
- widthOuter: 9.5
- coolant: *component_fuel_coolant
- intercoolant: *component_fuel_intercoolant
- axial shield: &block_axial_shield
- shield:
- shape: Circle
- material: HT9
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 64.0
- od: 0.90
- clad:
- shape: Circle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- id: 0.905
- mult: shield.mult
- od: 1.050
- gap:
- shape: Circle
- material: Void
- Tinput: 450.0
- Thot: 450.0
- id: shield.od
- mult: shield.mult
- od: clad.id
- duct: *component_duct_duct
- intercoolant: *component_fuel_intercoolant
- coolant: *component_fuel_coolant
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 30.15
- helixDiameter: 10.10
- id: 0.0
- mult: shield.mult
- od: 0.100
- plenum: &block_plenum
- clad:
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 470.0
- id: 1.0
- mult: 64.0
- od: 1.09
- gap:
- shape: Circle
- material: Void
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- mult: clad.mult
- od: clad.id
- wire:
- shape: Helix
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- axialPitch: 30.
- helixDiameter: 1.2
- id: 0.0
- mult: clad.mult
- od: 0.1
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- fuel2: &block_fuel2
- fuel:
- shape: Circle
- material: Custom
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- isotopics: MOX
- mult: 64.0
- od: 0.87
- clad: *component_fuel_clad
- bond: *component_fuel_bond
- wire: *component_fuel_wire
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
+ fuel: &block_fuel
+ fuel:
+ shape: Circle
+ material: UZr
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ mult: 64.0
+ od: 0.7
+ clad: &component_fuel_clad
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 470.0
+ id: 1.0
+ mult: fuel.mult
+ od: 1.15
+ bond: &component_fuel_bond
+ shape: Circle
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ id: fuel.od
+ mult: fuel.mult
+ od: clad.id
+ wire: &component_fuel_wire
+ shape: Helix
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ axialPitch: 30.15
+ helixDiameter: 1.2
+ id: 0.0
+ mult: fuel.mult
+ od: 0.100
+ coolant: &component_fuel_coolant
+ shape: DerivedShape
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ duct: &component_fuel_duct
+ shape: Rectangle
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ lengthInner: 9.0
+ lengthOuter: 9.5
+ mult: 1.0
+ widthInner: 9.0
+ widthOuter: 9.5
+ intercoolant: &component_fuel_intercoolant
+ shape: Rectangle
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ lengthInner: 9.5
+ lengthOuter: 10.0
+ mult: 1.0
+ widthInner: 9.5
+ widthOuter: 10.0
+ control: &block_control
+ control:
+ shape: Circle
+ material: B4C
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 25.0
+ od: 1.3
+ innerduct:
+ shape: Rectangle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ lengthInner: 8.0
+ lengthOuter: 8.5
+ mult: 1.0
+ widthInner: 8.0
+ widthOuter: 8.5
+ duct:
+ shape: Rectangle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ lengthInner: 8.7
+ lengthOuter: 9.0
+ mult: 1.0
+ widthInner: 8.7
+ widthOuter: 9.0
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ id: 1.35
+ mult: control.mult
+ od: 1.7
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 50.0
+ helixDiameter: 1.7
+ id: 0.0
+ mult: control.mult
+ od: 0.085
+ intercoolant: *component_fuel_intercoolant
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 450.0
+ Thot: 450.0
+ id: control.od
+ mult: control.mult
+ od: clad.id
+ coolant: *component_fuel_coolant
+ duct: &block_duct
+ duct: &component_duct_duct
+ shape: Rectangle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ lengthInner: 9.0
+ lengthOuter: 9.5
+ mult: 1.0
+ widthInner: 9.0
+ widthOuter: 9.5
+ coolant: *component_fuel_coolant
+ intercoolant: *component_fuel_intercoolant
+ grid plate: &block_grid_plate
+ grid:
+ shape: Rectangle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ lengthInner: 0.0
+ lengthOuter: 9.5
+ mult: 1.0
+ widthInner: 0.0
+ widthOuter: 9.5
+ coolant: *component_fuel_coolant
+ intercoolant: *component_fuel_intercoolant
+ axial shield: &block_axial_shield
+ shield:
+ shape: Circle
+ material: HT9
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 64.0
+ od: 0.90
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ id: 0.905
+ mult: shield.mult
+ od: 1.050
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 450.0
+ Thot: 450.0
+ id: shield.od
+ mult: shield.mult
+ od: clad.id
+ duct: *component_duct_duct
+ intercoolant: *component_fuel_intercoolant
+ coolant: *component_fuel_coolant
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 30.15
+ helixDiameter: 10.10
+ id: 0.0
+ mult: shield.mult
+ od: 0.100
+ plenum: &block_plenum
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 470.0
+ id: 1.0
+ mult: 64.0
+ od: 1.09
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ mult: clad.mult
+ od: clad.id
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ axialPitch: 30.
+ helixDiameter: 1.2
+ id: 0.0
+ mult: clad.mult
+ od: 0.1
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
+ fuel2: &block_fuel2
+ fuel:
+ shape: Circle
+ material: Custom
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ isotopics: MOX
+ mult: 64.0
+ od: 0.87
+ clad: *component_fuel_clad
+ bond: *component_fuel_bond
+ wire: *component_fuel_wire
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
assemblies:
- heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]
- axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]
- igniter fuel:
- specifier: IC
- blocks: &igniter_fuel_blocks [*block_grid_plate, *block_fuel, *block_fuel, *block_fuel, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- hotChannelFactors: TWRPclad
- material modifications:
- U235_wt_frac: &igniter_fuel_u235_wt_frac ['', 0.11, 0.11, 0.11, '']
- ZR_wt_frac: &igniter_fuel_zr_wt_frac ['', 0.06, 0.06, 0.06, '']
- xs types: &igniter_fuel_xs_types [A, A, A, A, A]
- middle fuel:
- specifier: MC
- blocks: [*block_grid_plate, *block_fuel2, *block_fuel2, *block_fuel2, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
- feed fuel:
- specifier: OC
- blocks: *igniter_fuel_blocks
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- material modifications:
- U235_wt_frac: *igniter_fuel_u235_wt_frac
- ZR_wt_frac: *igniter_fuel_zr_wt_frac
- xs types: *igniter_fuel_xs_types
- primary control:
- specifier: PC
- blocks: [*block_grid_plate, *block_duct, *block_duct, *block_control, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
- radial shield:
- specifier: SH
- blocks: [*block_grid_plate, *block_axial_shield, *block_axial_shield, *block_axial_shield, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
+ heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]
+ axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]
+ igniter fuel:
+ specifier: IC
+ blocks:
+ &igniter_fuel_blocks [
+ *block_grid_plate,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ hotChannelFactors: TWRPclad
+ material modifications:
+ U235_wt_frac: &igniter_fuel_u235_wt_frac ["", 0.11, 0.11, 0.11, ""]
+ ZR_wt_frac: &igniter_fuel_zr_wt_frac ["", 0.06, 0.06, 0.06, ""]
+ xs types: &igniter_fuel_xs_types [A, A, A, A, A]
+ middle fuel:
+ specifier: MC
+ blocks:
+ [
+ *block_grid_plate,
+ *block_fuel2,
+ *block_fuel2,
+ *block_fuel2,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
+ feed fuel:
+ specifier: OC
+ blocks: *igniter_fuel_blocks
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ material modifications:
+ U235_wt_frac: *igniter_fuel_u235_wt_frac
+ ZR_wt_frac: *igniter_fuel_zr_wt_frac
+ xs types: *igniter_fuel_xs_types
+ primary control:
+ specifier: PC
+ blocks:
+ [
+ *block_grid_plate,
+ *block_duct,
+ *block_duct,
+ *block_control,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
+ radial shield:
+ specifier: SH
+ blocks:
+ [
+ *block_grid_plate,
+ *block_axial_shield,
+ *block_axial_shield,
+ *block_axial_shield,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
systems:
- core:
- grid name: core
- origin:
- x: 0.0
- y: 0.0
- z: 0.0
+ core:
+ grid name: core
+ origin:
+ x: 0.0
+ y: 0.0
+ z: 0.0
grids:
- core:
- geom: cartesian
- symmetry: full
- lattice pitch:
- x: 10.0
- y: 10.0
- lattice map: |
- SH SH SH SH SH SH SH SH SH SH SH SH SH
- SH OC OC OC OC OC OC OC OC OC OC OC SH
- SH OC MC MC MC MC MC MC MC MC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC IC IC IC IC IC IC IC MC OC SH
- SH OC MC MC MC MC MC MC MC MC MC OC SH
- SH OC OC OC OC OC OC OC OC OC OC OC SH
- SH SH SH SH SH SH SH SH SH SH SH SH SH
\ No newline at end of file
+ core:
+ geom: cartesian
+ symmetry: full
+ lattice pitch:
+ x: 10.0
+ y: 10.0
+ lattice map: |
+ SH SH SH SH SH SH SH SH SH SH SH SH SH
+ SH OC OC OC OC OC OC OC OC OC OC OC SH
+ SH OC MC MC MC MC MC MC MC MC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC IC IC IC IC IC IC IC MC OC SH
+ SH OC MC MC MC MC MC MC MC MC MC OC SH
+ SH OC OC OC OC OC OC OC OC OC OC OC SH
+ SH SH SH SH SH SH SH SH SH SH SH SH SH
diff --git a/armi/tests/refSmallReactorBase.yaml b/armi/tests/refSmallReactorBase.yaml
index 963c63b1f..14ad2e062 100644
--- a/armi/tests/refSmallReactorBase.yaml
+++ b/armi/tests/refSmallReactorBase.yaml
@@ -1,436 +1,483 @@
custom isotopics:
- MOX:
- input format: number densities
- AM241: 2.3606e-05
- PU238: 3.7387e-06
- PU239: 0.00286038
- PU240: 0.000712945
- PU241: 9.82312e-05
- PU242: 2.02221e-05
- U235: 0.00405533
- U238: 0.0134125
- PuUZr:
- input format: mass fractions
- density: 9.491820414019937
- PU239: 0.1
- U235: 0.15
- U238: 0.65
- ZR: 0.1
+ MOX:
+ input format: number densities
+ AM241: 2.3606e-05
+ PU238: 3.7387e-06
+ PU239: 0.00286038
+ PU240: 0.000712945
+ PU241: 9.82312e-05
+ PU242: 2.02221e-05
+ U235: 0.00405533
+ U238: 0.0134125
+ PuUZr:
+ input format: mass fractions
+ PU239: 0.1
+ U235: 0.15
+ U238: 0.65
+ ZR: 0.1
blocks:
- fuel: &block_fuel
- fuel: &component_fuel_fuel
- shape: Circle
- material: UZr
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- mult: 169.0
- od: 0.86602
- clad: &component_fuel_clad
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 470.0
- id: 1.0
- mult: fuel.mult
- od: 1.09
- bond: &component_fuel_bond
- shape: Circle
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- id: fuel.od
- mult: fuel.mult
- od: clad.id
- wire: &component_fuel_wire
- shape: Helix
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- axialPitch: 30.15
- helixDiameter: 1.19056
- id: 0.0
- mult: fuel.mult
- od: 0.10056
- coolant: &component_fuel_coolant
- shape: DerivedShape
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- duct: &component_fuel_duct
- shape: Hexagon
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- ip: 16.0
- mult: 1.0
- op: 16.6
- intercoolant: &component_fuel_intercoolant
- shape: Hexagon
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- ip: duct.op
- mult: 1.0
- op: 16.75
- moveable control: &block_control
- control:
- shape: Circle
- material: B4C
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 61.0
- od: 1.286
- innerDuct:
- shape: Hexagon
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- ip: 14.268
- mult: 1.0
- op: 14.582
- duct: &component_control_duct
- shape: Hexagon
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- ip: 15.277
- mult: 1.0
- op: 16.28228
- clad:
- shape: Circle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- id: 1.358
- mult: control.mult
- od: 1.686
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 50.0
- helixDiameter: 1.771
- id: 0.0
- mult: control.mult
- od: 0.085
- intercoolant: *component_fuel_intercoolant
- gap:
- shape: Circle
- material: Void
- Tinput: 450.0
- Thot: 450.0
- id: control.od
- mult: control.mult
- od: clad.id
- coolant: *component_fuel_coolant
- duct: &block_duct
- duct: *component_control_duct
- coolant: *component_fuel_coolant
- intercoolant: *component_fuel_intercoolant
- grid plate: &block_grid_plate
- grid: &component_grid_plate_grid
- shape: Hexagon
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- ip: 15.277
- mult: 1.0
- op: 16.577
- coolant: *component_fuel_coolant
- intercoolant:
- shape: Hexagon
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- ip: grid.op
- mult: 1.0
- op: 16.75
- grid plate broken:
- grid: *component_grid_plate_grid
- coolant: *component_fuel_coolant
- intercoolant:
- shape: Hexagon
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- ip: grid.op
- mult: 1.0
- op: 0.0
- axial shield: &block_axial_shield
- shield:
- shape: Circle
- material: HT9
- Tinput: 600.0
- Thot: 600.0
- id: 0.0
- mult: 169.0
- od: 0.90362
- clad:
- shape: Circle
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- id: 0.90562
- mult: shield.mult
- od: 1.05036
- gap:
- shape: Circle
- material: Void
- Tinput: 450.0
- Thot: 450.0
- id: shield.od
- mult: shield.mult
- od: clad.id
- duct: *component_control_duct
- intercoolant: *component_fuel_intercoolant
- coolant: *component_fuel_coolant
- wire:
- shape: Helix
- material: HT9
- Tinput: 450.0
- Thot: 450.0
- axialPitch: 30.15
- helixDiameter: 16.85056
- id: 0.0
- mult: shield.mult
- od: 0.10056
- moveable plenum: &block_plenum
- clad:
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 470.0
- id: 1.0
- mult: 169.0
- od: 1.09
- gap:
- shape: Circle
- material: Void
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- mult: clad.mult
- od: clad.id
- wire:
- shape: Helix
- material: HT9
- Tinput: 25.0
- Thot: 450.0
- axialPitch: 30.15
- helixDiameter: 1.19056
- id: 0.0
- mult: clad.mult
- od: 0.10056
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- fuel2: &block_fuel2
- fuel:
- shape: Circle
- material: Custom
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- isotopics: MOX
- mult: 169.0
- od: 0.86602
- bond: &component_fuel_bond2
- shape: Circle
- material: Sodium
- Tinput: 450.0
- Thot: 450.0
- id: fuel.od
- mult: fuel.mult
- od: liner1.id
- clad: *component_fuel_clad
- liner1: &component_fuel2_liner1
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 600.0
- id: 0.99
- mergeWith: clad
- mult: 169.0
- od: 1.0
- liner2: &component_fuel2_liner2
- shape: Circle
- material: HT9
- Tinput: 25.0
- Thot: 600.0
- id: 0.98
- mergeWith: clad
- mult: 169.0
- od: 0.99
- wire: *component_fuel_wire
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- lta1 fuel: &block_lta1_fuel
- fuel: *component_fuel_fuel
- clad: *component_fuel_clad
- liner1: *component_fuel2_liner1
- liner2: *component_fuel2_liner2
- bond: *component_fuel_bond
- wire: *component_fuel_wire
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- lta2 fuel: &block_lta2_fuel
- fuel:
- shape: Circle
- material: UZr
- Tinput: 25.0
- Thot: 600.0
- id: 0.0
- isotopics: PuUZr
- mult: 169.0
- od: 0.86602
- clad: *component_fuel_clad
- liner1: *component_fuel2_liner1
- liner2: *component_fuel2_liner2
- bond: *component_fuel_bond
- wire: *component_fuel_wire
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
- annular fuel gap: &block_fuel3
- gap1:
- shape: Circle
- material: Void
- Tinput: 20.0
- Thot: 430.0
- id: 0.0
- mult: fuel.mult
- od: fuel.id
- fuel:
- shape: Circle
- material: UZr
- Tinput: 20.0
- Thot: 600.0
- id: 0.600
- mult: 169.0
- od: 0.878
- flags: annular fuel depletable
- gap2:
- shape: Circle
- material: Void
- Tinput: 20.0
- Thot: 430.0
- id: fuel.od
- mult: fuel.mult
- od: inner liner.id
- inner liner:
- shape: Circle
- material: HT9
- Tinput: 20.0
- Thot: 430.0
- id: 0.878
- mult: fuel.mult
- od: 0.898
- gap3:
- shape: Circle
- material: Void
- Tinput: 20.0
- Thot: 430.0
- id: inner liner.od
- mult: fuel.mult
- od: outer liner.id
- outer liner:
- shape: Circle
- material: Zr
- Tinput: 20.0
- Thot: 430.0
- id: 0.898
- mult: fuel.mult
- od: 0.900
- gap4:
- shape: Circle
- material: Void
- Tinput: 20.0
- Thot: 430.0
- id: outer liner.od
- mult: fuel.mult
- od: clad.id
- clad:
- shape: Circle
- material: HT9
- Tinput: 20.0
- Thot: 430.0
- id: 0.900
- mult: fuel.mult
- od: 1.000
- wire: *component_fuel_wire
- coolant: *component_fuel_coolant
- duct: *component_fuel_duct
- intercoolant: *component_fuel_intercoolant
+ fuel: &block_fuel
+ fuel: &component_fuel_fuel
+ shape: Circle
+ material: UZr
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ mult: 169.0
+ od: 0.86602
+ clad: &component_fuel_clad
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 470.0
+ id: 1.0
+ mult: fuel.mult
+ od: 1.09
+ bond: &component_fuel_bond
+ shape: Circle
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ id: fuel.od
+ mult: fuel.mult
+ od: clad.id
+ wire: &component_fuel_wire
+ shape: Helix
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ axialPitch: 30.15
+ helixDiameter: 1.19056
+ id: 0.0
+ mult: fuel.mult
+ od: 0.10056
+ coolant: &component_fuel_coolant
+ shape: DerivedShape
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ duct: &component_fuel_duct
+ shape: Hexagon
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ ip: 16.0
+ mult: 1.0
+ op: 16.6
+ intercoolant: &component_fuel_intercoolant
+ shape: Hexagon
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ ip: duct.op
+ mult: 1.0
+ op: 16.75
+ moveable control: &block_control
+ control:
+ shape: Circle
+ material: B4C
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 61.0
+ od: 1.286
+ innerDuct:
+ shape: Hexagon
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ ip: 14.268
+ mult: 1.0
+ op: 14.582
+ duct: &component_control_duct
+ shape: Hexagon
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ ip: 15.277
+ mult: 1.0
+ op: 16.28228
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ id: 1.358
+ mult: control.mult
+ od: 1.686
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 50.0
+ helixDiameter: 1.771
+ id: 0.0
+ mult: control.mult
+ od: 0.085
+ intercoolant: *component_fuel_intercoolant
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 450.0
+ Thot: 450.0
+ id: control.od
+ mult: control.mult
+ od: clad.id
+ coolant: *component_fuel_coolant
+ duct: &block_duct
+ duct: *component_control_duct
+ coolant: *component_fuel_coolant
+ intercoolant: *component_fuel_intercoolant
+ grid plate: &block_grid_plate
+ grid: &component_grid_plate_grid
+ shape: Hexagon
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ ip: 15.277
+ mult: 1.0
+ op: 16.577
+ coolant: *component_fuel_coolant
+ intercoolant:
+ shape: Hexagon
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ ip: grid.op
+ mult: 1.0
+ op: 16.75
+ grid plate broken:
+ grid: *component_grid_plate_grid
+ coolant: *component_fuel_coolant
+ intercoolant:
+ shape: Hexagon
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ ip: grid.op
+ mult: 1.0
+ op: 0.0
+ axial shield: &block_axial_shield
+ shield:
+ shape: Circle
+ material: HT9
+ Tinput: 600.0
+ Thot: 600.0
+ id: 0.0
+ mult: 169.0
+ od: 0.90362
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ id: 0.90562
+ mult: shield.mult
+ od: 1.05036
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 450.0
+ Thot: 450.0
+ id: shield.od
+ mult: shield.mult
+ od: clad.id
+ duct: *component_control_duct
+ intercoolant: *component_fuel_intercoolant
+ coolant: *component_fuel_coolant
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 450.0
+ Thot: 450.0
+ axialPitch: 30.15
+ helixDiameter: 16.85056
+ id: 0.0
+ mult: shield.mult
+ od: 0.10056
+ moveable plenum: &block_plenum
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 470.0
+ id: 1.0
+ mult: 169.0
+ od: 1.09
+ gap:
+ shape: Circle
+ material: Void
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ mult: clad.mult
+ od: clad.id
+ wire:
+ shape: Helix
+ material: HT9
+ Tinput: 25.0
+ Thot: 450.0
+ axialPitch: 30.15
+ helixDiameter: 1.19056
+ id: 0.0
+ mult: clad.mult
+ od: 0.10056
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
+ fuel2: &block_fuel2
+ fuel:
+ shape: Circle
+ material: Custom
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ isotopics: MOX
+ mult: 169.0
+ od: 0.86602
+ bond: &component_fuel_bond2
+ shape: Circle
+ material: Sodium
+ Tinput: 450.0
+ Thot: 450.0
+ id: fuel.od
+ mult: fuel.mult
+ od: liner1.id
+ clad: *component_fuel_clad
+ liner1: &component_fuel2_liner1
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.99
+ mergeWith: clad
+ mult: 169.0
+ od: 1.0
+ liner2: &component_fuel2_liner2
+ shape: Circle
+ material: HT9
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.98
+ mergeWith: clad
+ mult: 169.0
+ od: 0.99
+ wire: *component_fuel_wire
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
+ lta1 fuel: &block_lta1_fuel
+ fuel: *component_fuel_fuel
+ clad: *component_fuel_clad
+ liner1: *component_fuel2_liner1
+ liner2: *component_fuel2_liner2
+ bond: *component_fuel_bond
+ wire: *component_fuel_wire
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
+ lta2 fuel: &block_lta2_fuel
+ fuel:
+ shape: Circle
+ material: UZr
+ Tinput: 25.0
+ Thot: 600.0
+ id: 0.0
+ isotopics: PuUZr
+ mult: 169.0
+ od: 0.86602
+ clad: *component_fuel_clad
+ liner1: *component_fuel2_liner1
+ liner2: *component_fuel2_liner2
+ bond: *component_fuel_bond
+ wire: *component_fuel_wire
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
+ annular fuel gap: &block_fuel3
+ gap1:
+ shape: Circle
+ material: Void
+ Tinput: 20.0
+ Thot: 430.0
+ id: 0.0
+ mult: fuel.mult
+ od: fuel.id
+ fuel:
+ shape: Circle
+ material: UZr
+ Tinput: 20.0
+ Thot: 600.0
+ id: 0.600
+ mult: 169.0
+ od: 0.878
+ flags: annular fuel depletable
+ gap2:
+ shape: Circle
+ material: Void
+ Tinput: 20.0
+ Thot: 430.0
+ id: fuel.od
+ mult: fuel.mult
+ od: inner liner.id
+ inner liner:
+ shape: Circle
+ material: HT9
+ Tinput: 20.0
+ Thot: 430.0
+ id: 0.878
+ mult: fuel.mult
+ od: 0.898
+ gap3:
+ shape: Circle
+ material: Void
+ Tinput: 20.0
+ Thot: 430.0
+ id: inner liner.od
+ mult: fuel.mult
+ od: outer liner.id
+ outer liner:
+ shape: Circle
+ material: Zr
+ Tinput: 20.0
+ Thot: 430.0
+ id: 0.898
+ mult: fuel.mult
+ od: 0.900
+ gap4:
+ shape: Circle
+ material: Void
+ Tinput: 20.0
+ Thot: 430.0
+ id: outer liner.od
+ mult: fuel.mult
+ od: clad.id
+ clad:
+ shape: Circle
+ material: HT9
+ Tinput: 20.0
+ Thot: 430.0
+ id: 0.900
+ mult: fuel.mult
+ od: 1.000
+ wire: *component_fuel_wire
+ coolant: *component_fuel_coolant
+ duct: *component_fuel_duct
+ intercoolant: *component_fuel_intercoolant
assemblies:
- heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]
- axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]
- igniter fuel:
- specifier: IC
- blocks: &igniter_fuel_blocks [*block_grid_plate, *block_fuel, *block_fuel, *block_fuel, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- material modifications:
- U235_wt_frac: &igniter_fuel_u235_wt_frac ['', 0.11, 0.11, 0.11, '']
- ZR_wt_frac: &igniter_fuel_zr_wt_frac ['', 0.06, 0.06, 0.06, '']
- xs types: &igniter_fuel_xs_types [A, A, A, A, A]
- nozzleType: Inner
- middle fuel:
- specifier: MC
- blocks: [*block_grid_plate, *block_fuel2, *block_fuel2, *block_fuel2, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: &middle_fuel_xs_types [Z, Z, Z, Z, Z]
- annular fuel:
- specifier: AF
- blocks: [*block_grid_plate, *block_fuel3, *block_fuel3, *block_fuel3, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
- lta fuel:
- specifier: LA
- blocks: [*block_grid_plate, *block_lta1_fuel, *block_lta1_fuel, *block_lta1_fuel, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- material modifications:
- U235_wt_frac: <a_fuel_u235_wt_frac ['', 0.2, 0.2, 0.2, '']
- ZR_wt_frac: <a_fuel_zr_wt_frac ['', 0.07, 0.07, 0.06, '']
- xs types: *igniter_fuel_xs_types
- nozzleType: lta
- lta fuel b:
- specifier: LB
- blocks: [*block_grid_plate, *block_lta2_fuel, *block_lta2_fuel, *block_lta2_fuel, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- material modifications:
- U235_wt_frac: *lta_fuel_u235_wt_frac
- ZR_wt_frac: *lta_fuel_zr_wt_frac
- xs types: *igniter_fuel_xs_types
- nozzleType: lta
- feed fuel:
- specifier: OC
- blocks: *igniter_fuel_blocks
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- material modifications:
- U235_wt_frac: *igniter_fuel_u235_wt_frac
- ZR_wt_frac: *igniter_fuel_zr_wt_frac
- xs types: *igniter_fuel_xs_types
- nozzleType: Outer
- primary control:
- specifier: PC
- blocks: [*block_grid_plate, *block_duct, *block_control, *block_plenum, *block_duct]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
- radial shield:
- specifier: SH
- blocks: [*block_grid_plate, *block_axial_shield, *block_axial_shield, *block_axial_shield, *block_plenum]
- height: *standard_heights
- axial mesh points: *standard_axial_mesh_points
- xs types: *igniter_fuel_xs_types
-
+ heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]
+ axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]
+ igniter fuel:
+ specifier: IC
+ blocks:
+ &igniter_fuel_blocks [
+ *block_grid_plate,
+ *block_fuel,
+ *block_fuel,
+ *block_fuel,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ material modifications:
+ U235_wt_frac: &igniter_fuel_u235_wt_frac ["", 0.11, 0.11, 0.11, ""]
+ ZR_wt_frac: &igniter_fuel_zr_wt_frac ["", 0.06, 0.06, 0.06, ""]
+ xs types: &igniter_fuel_xs_types [A, A, A, A, A]
+ nozzleType: Inner
+ middle fuel:
+ specifier: MC
+ blocks:
+ [
+ *block_grid_plate,
+ *block_fuel2,
+ *block_fuel2,
+ *block_fuel2,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: &middle_fuel_xs_types [Z, Z, Z, Z, Z]
+ annular fuel:
+ specifier: AF
+ blocks:
+ [
+ *block_grid_plate,
+ *block_fuel3,
+ *block_fuel3,
+ *block_fuel3,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
+ lta fuel:
+ specifier: LA
+ blocks:
+ [
+ *block_grid_plate,
+ *block_lta1_fuel,
+ *block_lta1_fuel,
+ *block_lta1_fuel,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ material modifications:
+ U235_wt_frac: <a_fuel_u235_wt_frac ["", 0.2, 0.2, 0.2, ""]
+ ZR_wt_frac: <a_fuel_zr_wt_frac ["", 0.07, 0.07, 0.06, ""]
+ xs types: *igniter_fuel_xs_types
+ nozzleType: lta
+ lta fuel b:
+ specifier: LB
+ blocks:
+ [
+ *block_grid_plate,
+ *block_lta2_fuel,
+ *block_lta2_fuel,
+ *block_lta2_fuel,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ material modifications:
+ U235_wt_frac: *lta_fuel_u235_wt_frac
+ ZR_wt_frac: *lta_fuel_zr_wt_frac
+ xs types: *igniter_fuel_xs_types
+ nozzleType: lta
+ feed fuel:
+ specifier: OC
+ blocks: *igniter_fuel_blocks
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ material modifications:
+ U235_wt_frac: *igniter_fuel_u235_wt_frac
+ ZR_wt_frac: *igniter_fuel_zr_wt_frac
+ xs types: *igniter_fuel_xs_types
+ nozzleType: Outer
+ primary control:
+ specifier: PC
+ blocks:
+ [
+ *block_grid_plate,
+ *block_duct,
+ *block_control,
+ *block_plenum,
+ *block_duct,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
+ radial shield:
+ specifier: SH
+ blocks:
+ [
+ *block_grid_plate,
+ *block_axial_shield,
+ *block_axial_shield,
+ *block_axial_shield,
+ *block_plenum,
+ ]
+ height: *standard_heights
+ axial mesh points: *standard_axial_mesh_points
+ xs types: *igniter_fuel_xs_types
diff --git a/armi/tests/refSmallReactorShuffleLogic.py b/armi/tests/refSmallReactorShuffleLogic.py
index ca1e6e4ad..209aef8c3 100644
--- a/armi/tests/refSmallReactorShuffleLogic.py
+++ b/armi/tests/refSmallReactorShuffleLogic.py
@@ -46,7 +46,6 @@ def chooseSwaps(self, factorList):
def getFactorList(cycle, cs=None, fallBack=False):
-
# prefer to keep these 0 through 1 since this is what the branch search can do.
defaultFactorList = {}
factorSearchFlags = []
diff --git a/armi/tests/refSmallSfpGrid.yaml b/armi/tests/refSmallSfpGrid.yaml
index bcefb6c7e..9fa8c350e 100644
--- a/armi/tests/refSmallSfpGrid.yaml
+++ b/armi/tests/refSmallSfpGrid.yaml
@@ -1,11 +1,11 @@
sfp:
- symmetry: full
- geom: cartesian
- lattice pitch:
- x: 50.0
- y: 50.0
- grid contents:
- [0,0]: MC
- [1,0]: MC
- [0,1]: MC
- [1,1]: MC
+ symmetry: full
+ geom: cartesian
+ lattice pitch:
+ x: 50.0
+ y: 50.0
+ grid contents:
+ [0, 0]: MC
+ [1, 0]: MC
+ [0, 1]: MC
+ [1, 1]: MC
diff --git a/armi/tests/refTestCartesian.yaml b/armi/tests/refTestCartesian.yaml
index ef865625a..dc166ade1 100644
--- a/armi/tests/refTestCartesian.yaml
+++ b/armi/tests/refTestCartesian.yaml
@@ -1,32 +1,27 @@
settings:
+# global
beta: 0.003454
- BOL: true
buGroups:
- 100
burnSteps: 0
- clusterExclusive: false
comment: Full-core Cartesian input file with a 10x10 cm square pitch.
cycleLength: 2000.0
- economics: false
- epsBurnTime: 0.001
- epsFSAvg: 1e-06
- epsFSPoint: 1e-06
- eqRingSchedule:
- - 13
- - 1
freshFeedType: igniter fuel
- jumpRingNum: 9
loadingFile: refSmallCartesian.yaml
- startNode: 1
- loadPadElevation: 200.0
- max2SigmaCladIDT: 630.0
- maxFlowZones: 12
- maxRegionDensityIterations: 5
outputFileExtension: png
- percentNaReduction: 10.0
power: 400000000.0
- summarizeAssemDesign: false
+ startNode: 1
targetK: 1.002
- transientForSensitivity: ''
versions:
armi: uncontrolled
+
+# fuel cycle
+ jumpRingNum: 9
+
+# neutronics
+ epsFSAvg: 1e-06
+ epsFSPoint: 1e-06
+ loadPadElevation: 200.0
+
+# report
+ summarizeAssemDesign: false
diff --git a/armi/tests/smallestTestReactor/armiRunSmallest.yaml b/armi/tests/smallestTestReactor/armiRunSmallest.yaml
new file mode 100644
index 000000000..544bc7b18
--- /dev/null
+++ b/armi/tests/smallestTestReactor/armiRunSmallest.yaml
@@ -0,0 +1,73 @@
+# This is a non-physical test reactor.
+# This is designed to speed up testing of code that only technically needs a full reactor object.
+# This is a single-hex-assembly reactor, with only one block.
+
+settings:
+# global
+ availabilityFactor: 1
+ beta: 0.003454
+ branchVerbosity: debug
+ buGroups:
+ - 100
+ burnSteps: 2
+ comment: Simple test input.
+ cycleLength: 2000.0
+ detailAssemLocationsBOL:
+ - 002-001
+ freshFeedType: igniter fuel
+ loadingFile: refSmallestReactor.yaml
+ moduleVerbosity:
+ armi.reactor.reactors: info
+ nCycles: 2
+ outputFileExtension: png
+ power: 1000000.0
+ smallRun: true
+ startCycle: 1
+ startNode: 2
+ targetK: 1.002
+ verbosity: extra
+ versions:
+ armi: uncontrolled
+
+# cross section
+ crossSectionControl:
+ DA:
+ geometry: 0D
+ blockRepresentation: Median
+ criticalBuckling: true
+ externalDriver: true
+ useHomogenizedBlockComposition: false
+ numInternalRings: 1
+ numExternalRings: 1
+ XA:
+ xsFileLocation:
+ - ISOXA
+ YA:
+ geometry: 0D
+ fluxFileLocation: rzmflxYA
+ ZA:
+ geometry: 1D cylinder
+ blockRepresentation: ComponentAverage1DCylinder
+ validBlockTypes:
+ - fuel
+ externalDriver: false
+ mergeIntoClad:
+ - gap
+ numInternalRings: 1
+ numExternalRings: 1
+
+# database
+ db: false
+
+# fuel cycle
+ fuelHandlerName: EquilibriumShuffler
+ jumpRingNum: 9
+
+# neutronics
+ epsFSAvg: 1e-06
+ epsFSPoint: 1e-06
+ loadPadElevation: 200.0
+
+# report
+ genReports: false
+ summarizeAssemDesign: false
diff --git a/armi/tests/refOneBlockReactor.yaml b/armi/tests/smallestTestReactor/refOneBlockReactor.yaml
similarity index 91%
rename from armi/tests/refOneBlockReactor.yaml
rename to armi/tests/smallestTestReactor/refOneBlockReactor.yaml
index e5fa2ea36..762f65375 100644
--- a/armi/tests/refOneBlockReactor.yaml
+++ b/armi/tests/smallestTestReactor/refOneBlockReactor.yaml
@@ -57,14 +57,13 @@ blocks:
op: 16.8
assemblies:
heights: &standard_heights [25.0]
- axial mesh points: &standard_axial_mesh_points [ 1]
+ axial mesh points: &standard_axial_mesh_points [1]
igniter fuel:
specifier: IC
- blocks: &igniter_fuel_blocks [ *block_fuel]
+ blocks: &igniter_fuel_blocks [*block_fuel]
height: *standard_heights
axial mesh points: *standard_axial_mesh_points
material modifications:
- U235_wt_frac: &igniter_fuel_u235_wt_frac [ 0.11]
+ U235_wt_frac: &igniter_fuel_u235_wt_frac [0.11]
ZR_wt_frac: &igniter_fuel_zr_wt_frac [0.06]
xs types: &igniter_fuel_xs_types [A]
-
diff --git a/armi/tests/smallestTestReactor/refSmallestReactor.yaml b/armi/tests/smallestTestReactor/refSmallestReactor.yaml
new file mode 100644
index 000000000..8b9396b5e
--- /dev/null
+++ b/armi/tests/smallestTestReactor/refSmallestReactor.yaml
@@ -0,0 +1,21 @@
+!include refOneBlockReactor.yaml
+systems:
+ core:
+ grid name: core
+ origin:
+ x: 0.0
+ y: 0.0
+ z: 0.0
+ Spent Fuel Pool:
+ type: sfp
+ grid name: sfp
+ origin:
+ x: 5000.0
+ y: 5000.0
+ z: 6000.0
+grids:
+ core:
+ geom: hex_corners_up
+ lattice map: |
+ IC
+ symmetry: full
\ No newline at end of file
diff --git a/armi/tests/test_context.py b/armi/tests/test_context.py
new file mode 100644
index 000000000..69e737fd3
--- /dev/null
+++ b/armi/tests/test_context.py
@@ -0,0 +1,37 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Serial tests for the Context module."""
+import unittest
+
+from armi import context
+
+
+class TestContextSerial(unittest.TestCase):
+ """Serial tests for the Context module."""
+
+ @unittest.skipIf(context.MPI_SIZE > 1, "Serial test only")
+ def test_rank(self):
+ self.assertEqual(context.MPI_RANK, 0)
+ self.assertEqual(context.MPI_SIZE, 1)
+
+ @unittest.skipIf(context.MPI_SIZE > 1, "Serial test only")
+ def test_nonNoneData(self):
+ self.assertGreater(len(context.APP_DATA), 0)
+ self.assertGreater(len(context.DOC), 0)
+ self.assertGreater(len(context.getFastPath()), 0)
+ self.assertGreater(len(context.PROJECT_ROOT), 0)
+ self.assertGreater(len(context.RES), 0)
+ self.assertGreater(len(context.ROOT), 0)
+ self.assertGreater(len(context.USER), 0)
diff --git a/armi/tests/test_lwrInputs.py b/armi/tests/test_lwrInputs.py
index 93c10a111..1207ea472 100644
--- a/armi/tests/test_lwrInputs.py
+++ b/armi/tests/test_lwrInputs.py
@@ -16,7 +16,7 @@
import os
import unittest
-import numpy
+import numpy as np
from armi import Mode
from armi import runLog
@@ -61,11 +61,8 @@ def test_loadC5G7(self):
# test warnings are being logged for malformed isotopics info in the settings file
streamVal = mock.getStdout()
- self.assertGreater(streamVal.count("[warn]"), 32, msg=streamVal)
- self.assertGreater(streamVal.count("custom isotopics"), 32, msg=streamVal)
self.assertIn("UraniumOxide", streamVal, msg=streamVal)
self.assertIn("SaturatedWater", streamVal, msg=streamVal)
- self.assertIn("invalid settings: fakeBad", streamVal, msg=streamVal)
# test that there are 100 of each high, medium, and low MOX pins
fuelPinsHigh = b.getComponent(Flags.HIGH | Flags.MOX)
@@ -97,4 +94,4 @@ def loadLocs(o, locs):
for indices, coordsInput in sorted(locsInput.items()):
coordsDB = locsDB[indices]
- self.assertTrue(numpy.allclose(coordsInput, coordsDB))
+ self.assertTrue(np.allclose(coordsInput, coordsDB))
diff --git a/armi/tests/test_mpiActions.py b/armi/tests/test_mpiActions.py
index b43ae54c2..35bea5007 100644
--- a/armi/tests/test_mpiActions.py
+++ b/armi/tests/test_mpiActions.py
@@ -26,7 +26,6 @@
from armi import context
from armi.reactor.tests import test_reactors
from armi.tests import mockRunLogs
-from armi.tests import TEST_ROOT
from armi.utils import iterables
@@ -126,7 +125,9 @@ def test_runActionsDistributionAction(self):
allObjs = list(range(numObjs))
objs = self._distributeObjects(allObjs, numProcs)
- o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
act = DistributionAction([self.action])
act.invokeHook = passer
@@ -139,7 +140,9 @@ def test_runActionsDistributeStateAction(self):
allObjs = list(range(numObjs))
objs = self._distributeObjects(allObjs, numProcs)
- o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ o, r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
act = DistributeStateAction([self.action])
act.invokeHook = passer
@@ -153,7 +156,9 @@ def test_diagnosePickleErrorTestReactor(self):
We expect this to run all the way through the pickle diagnoser,
because the test reactor should be easily picklable.
"""
- o, _ = test_reactors.loadTestReactor(TEST_ROOT)
+ o, _ = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
with mockRunLogs.BufferLog() as mock:
self.assertEqual("", mock.getStdout())
diff --git a/armi/tests/test_mpiFeatures.py b/armi/tests/test_mpiFeatures.py
index abf16fa10..3682d5687 100644
--- a/armi/tests/test_mpiFeatures.py
+++ b/armi/tests/test_mpiFeatures.py
@@ -24,9 +24,9 @@
or
mpiexec.exe -n 2 python -m pytest armi/tests/test_mpiFeatures.py
"""
-from distutils.spawn import find_executable
from unittest.mock import patch
import os
+import shutil
import unittest
from armi import context
@@ -47,9 +47,9 @@
# determine if this is a parallel run, and MPI is installed
MPI_EXE = None
-if find_executable("mpiexec.exe") is not None:
+if shutil.which("mpiexec.exe") is not None:
MPI_EXE = "mpiexec.exe"
-elif find_executable("mpiexec") is not None:
+elif shutil.which("mpiexec") is not None:
MPI_EXE = "mpiexec"
MPI_COMM = context.MPI_COMM
@@ -91,11 +91,20 @@ def workerOperate(self, cmd):
return False
+class MockInterface(Interface):
+ name = "mockInterface"
+
+ def interactInit(self):
+ pass
+
+
class MpiOperatorTests(unittest.TestCase):
"""Testing the MPI parallelization operator."""
def setUp(self):
- self.old_op, self.r = test_reactors.loadTestReactor(TEST_ROOT)
+ self.old_op, self.r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.o = OperatorMPI(cs=self.old_op.cs)
self.o.r = self.r
@@ -140,6 +149,31 @@ def test_primaryCritical(self):
else:
self.o.operate()
+ @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
+ def test_finalizeInteract(self):
+ """Test to make sure workers are reset after interface interactions."""
+ # Add a random number of interfaces
+ interface = MockInterface(self.o.r, self.o.cs)
+ self.o.addInterface(interface)
+
+ with mockRunLogs.BufferLog() as mock:
+ if context.MPI_RANK == 0:
+ self.o.interactAllInit()
+ context.MPI_COMM.bcast("quit", root=0)
+ context.MPI_COMM.bcast("finished", root=0)
+ else:
+ self.o.workerOperate()
+
+ logMessage = (
+ "Workers have been reset."
+ if context.MPI_RANK == 0
+ else "Workers are being reset."
+ )
+ numCalls = len(
+ [line for line in mock.getStdout().splitlines() if logMessage in line]
+ )
+ self.assertGreaterEqual(numCalls, 1)
+
# these two must be defined up here so that they can be pickled
class BcastAction1(mpiActions.MpiAction):
@@ -317,3 +351,21 @@ def test_cleanPathMpi(self):
pathTools.cleanPath(dir3, mpiRank=context.MPI_RANK)
MPI_COMM.barrier()
self.assertFalse(os.path.exists(dir3))
+
+
+class TestContextMpi(unittest.TestCase):
+ """Parallel tests for the Context module."""
+
+ @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
+ def test_rank(self):
+ self.assertGreater(context.MPI_RANK, -1)
+
+ @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
+ def test_nonNoneData(self):
+ self.assertGreater(len(context.APP_DATA), 0)
+ self.assertGreater(len(context.DOC), 0)
+ self.assertGreater(len(context.getFastPath()), 0)
+ self.assertGreater(len(context.PROJECT_ROOT), 0)
+ self.assertGreater(len(context.RES), 0)
+ self.assertGreater(len(context.ROOT), 0)
+ self.assertGreater(len(context.USER), 0)
diff --git a/armi/tests/test_mpiParameters.py b/armi/tests/test_mpiParameters.py
index a5a6bd5e9..4754a7690 100644
--- a/armi/tests/test_mpiParameters.py
+++ b/armi/tests/test_mpiParameters.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the MPI portion of the Parameters class."""
-from distutils.spawn import find_executable
+import shutil
import unittest
from armi import context
@@ -21,9 +21,9 @@
# determine if this is a parallel run, and MPI is installed
MPI_EXE = None
-if find_executable("mpiexec.exe") is not None:
+if shutil.which("mpiexec.exe") is not None:
MPI_EXE = "mpiexec.exe"
-elif find_executable("mpiexec") is not None:
+elif shutil.which("mpiexec") is not None:
MPI_EXE = "mpiexec"
diff --git a/armi/tests/test_plugins.py b/armi/tests/test_plugins.py
index d63b4957f..209fe7336 100644
--- a/armi/tests/test_plugins.py
+++ b/armi/tests/test_plugins.py
@@ -26,6 +26,7 @@
from armi import plugins
from armi import settings
from armi import utils
+from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger
from armi.physics.neutronics import NeutronicsPlugin
from armi.reactor.blocks import Block
from armi.reactor.flags import Flags
@@ -42,13 +43,27 @@ def defineFlags():
return {"SUPER_FLAG": utils.flags.auto()}
+class SillyAxialExpansionChanger(AxialExpansionChanger):
+ """Fake, test-specific axial expansion changer that a plugin will register."""
+
+
+class SillyAxialPlugin(plugins.ArmiPlugin):
+ """Trivial plugin that implements the axial expansion hook."""
+
+ @staticmethod
+ @plugins.HOOKIMPL
+ def getAxialExpansionChanger() -> type[SillyAxialExpansionChanger]:
+ return SillyAxialExpansionChanger
+
+
class TestPluginRegistration(unittest.TestCase):
def setUp(self):
"""
Manipulate the standard App. We can't just configure our own, since the
pytest environment bleeds between tests.
"""
- self._backupApp = deepcopy(getApp())
+ self.app = getApp()
+ self._backupApp = deepcopy(self.app)
def tearDown(self):
"""Restore the App to its original state."""
@@ -92,6 +107,18 @@ def test_defineFlags(self):
# show the flag exists now
self.assertEqual(type(Flags.SUPER_FLAG._value), int)
+ def test_axialExpansionHook(self):
+ """Test that plugins can override the axial expansion of assemblies via a hook."""
+ pm = self.app.pluginManager
+ first = pm.hook.getAxialExpansionChanger()
+ # By default, make sure we get the armi-shipped expansion class
+ self.assertIs(first, AxialExpansionChanger)
+ pm.register(SillyAxialPlugin)
+ second = pm.hook.getAxialExpansionChanger()
+ # Registering a plugin that implements the hook means we get
+ # that plugin's axial expander
+ self.assertIs(second, SillyAxialExpansionChanger)
+
class TestPluginBasics(unittest.TestCase):
def test_defineParameters(self):
@@ -153,7 +180,9 @@ def test_pluginsExposeInterfaces(self):
:tests: R_ARMI_PLUGIN_INTERFACES
"""
# generate a test operator, with a full set of interfaces from plugsin
- o = loadTestReactor(TEST_ROOT)[0]
+ o = loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )[0]
pm = getPluginManagerOrFail()
# test the plugins were generated
diff --git a/armi/tests/test_runLog.py b/armi/tests/test_runLog.py
index c79bfd139..ddfc9198f 100644
--- a/armi/tests/test_runLog.py
+++ b/armi/tests/test_runLog.py
@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the runLog tooling."""
-from io import StringIO
-from shutil import rmtree
import logging
import os
import unittest
+from io import StringIO
+from shutil import rmtree
from armi import runLog
from armi.tests import mockRunLogs
@@ -388,8 +388,16 @@ def test_concatenateLogs(self):
with open(stdoutFile2, "w") as f:
f.write("hello other world\n")
+ # verify behavior for a corner case
+ stdoutFile3 = os.path.join(
+ logDir, "{}..0000.stdout".format(runLog.STDOUT_LOGGER_NAME)
+ )
+ with open(stdoutFile3, "w") as f:
+ f.write("hello world again\n")
+
self.assertTrue(os.path.exists(stdoutFile1))
self.assertTrue(os.path.exists(stdoutFile2))
+ self.assertTrue(os.path.exists(stdoutFile3))
# create a stderr file
stderrFile = os.path.join(
@@ -408,8 +416,22 @@ def test_concatenateLogs(self):
self.assertTrue(os.path.exists(combinedLogFile))
self.assertFalse(os.path.exists(stdoutFile1))
self.assertFalse(os.path.exists(stdoutFile2))
+ self.assertFalse(os.path.exists(stdoutFile3))
self.assertFalse(os.path.exists(stderrFile))
+ # verify behavior for a corner case
+ stdoutFile3 = os.path.join(
+ logDir, "{}..0000.stdout".format(runLog.STDOUT_LOGGER_NAME)
+ )
+ with open(stdoutFile3, "w") as f:
+ f.write("hello world again\n")
+ # concat logs
+ runLog.concatenateLogs(logDir=logDir)
+ # verify output
+ combinedLogFile = os.path.join(logDir, "armi-workers-mpi.log")
+ self.assertTrue(os.path.exists(combinedLogFile))
+ self.assertFalse(os.path.exists(stdoutFile3))
+
def test_createLogDir(self):
"""Test the createLogDir() method.
diff --git a/armi/tests/test_user_plugins.py b/armi/tests/test_user_plugins.py
index 526797e5b..0840afd58 100644
--- a/armi/tests/test_user_plugins.py
+++ b/armi/tests/test_user_plugins.py
@@ -244,7 +244,9 @@ def test_userPluginOnProcessCoreLoading(self):
plug0 = [p[1] for p in pluginz if p[0] == name][0]
# load a reactor and grab the fuel assemblies
- o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ o, r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
fuels = r.core.getBlocks(Flags.FUEL)
# prove that our plugin affects the core in the desired way
@@ -269,7 +271,9 @@ def test_userPluginWithInterfaces(self):
self.assertIn("UserPluginWithInterface", pluginNames)
# load a reactor and grab the fuel assemblieapps
- o, r = test_reactors.loadTestReactor(TEST_ROOT)
+ o, r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
_fuels = r.core.getAssemblies(Flags.FUEL)
# This is here because we have multiple tests altering the App()
diff --git a/armi/tests/tutorials/anl-afci-177-blueprints.yaml b/armi/tests/tutorials/anl-afci-177-blueprints.yaml
index 7ba1f077a..23d77ebdf 100644
--- a/armi/tests/tutorials/anl-afci-177-blueprints.yaml
+++ b/armi/tests/tutorials/anl-afci-177-blueprints.yaml
@@ -9,7 +9,7 @@ blocks:
Tinput: 25.0
Thot: 450.0
id: 0.6962
- od: 0.808
+ od: 0.808
mult: 271
# end-block-clad
wire:
@@ -167,7 +167,7 @@ blocks:
Tinput: 25.0
Thot: 450.0
id: 0.6962
- od: 0.808
+ od: 0.808
mult: 271
wire:
shape: Helix
@@ -308,38 +308,37 @@ assemblies:
blocks: [*block_reflector]
height: [307.74]
axial mesh points: [1]
- xs types: [A]
+ xs types: [A]
# end-assemblies-rr
radial shield:
specifier: SH
blocks: [*block_shield]
height: [307.74]
axial mesh points: [1]
- xs types: [A]
+ xs types: [A]
# end-assemblies-sh
control:
specifier: PC
blocks: [*block_control]
height: [307.74]
axial mesh points: [1]
- xs types: [A]
+ xs types: [A]
ultimate shutdown:
specifier: US
blocks: [*block_control]
height: [307.74]
axial mesh points: [1]
- xs types: [A]
+ xs types: [A]
# end-assemblies-section
systems:
core:
- grid name: core
- origin:
- x: 0.0
- y: 0.0
- z: 0.0
+ grid name: core
+ origin:
+ x: 0.0
+ y: 0.0
+ z: 0.0
grids:
core:
!include anl-afci-177-coreMap.yaml
# end-systems-section
-
diff --git a/armi/tests/tutorials/anl-afci-177.yaml b/armi/tests/tutorials/anl-afci-177.yaml
index 3fdc5efce..02f8c512a 100644
--- a/armi/tests/tutorials/anl-afci-177.yaml
+++ b/armi/tests/tutorials/anl-afci-177.yaml
@@ -1,5 +1,5 @@
# This file is part of the walthrough_inputs tutorial in ARMI, which
-# uses .. literalinclude to bring in sections of this file. Thus,
+# uses .. literalinclude to bring in sections of this file. Thus,
# the comments and order are important. These will get wiped out
# if you load and re-write a settings file via the ARMI gui, unfortunately.
# begin-settings
diff --git a/armi/tests/tutorials/c5g7-blueprints.yaml b/armi/tests/tutorials/c5g7-blueprints.yaml
index 29d8a9b26..f2428b0dc 100644
--- a/armi/tests/tutorials/c5g7-blueprints.yaml
+++ b/armi/tests/tutorials/c5g7-blueprints.yaml
@@ -1,8 +1,9 @@
# Simple description of the C5G7 benchmark problem
-# General description from: https://www.oecd-nea.org/science/docs/2003/nsc-doc2003-16.pdf
-# Composition/dimensions description from: https://www.oecd-nea.org/science/docs/1996/nsc-doc96-02-rev2.pdf
+# General description from: https://www.oecd-nea.org/upload/docs/application/pdf/2019-12/nsc-doc2003-16.pdf
+# Composition/dimensions description from: https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf
# start-custom-isotopics
custom isotopics:
+ # NEA/NSC/DOC(96)2 Table 2 - Isotopic Distributions for each medium
mox low: # 4.3%
input format: number densities
U235: 5.00E-5
@@ -45,45 +46,40 @@ custom isotopics:
input format: number densities
H: 6.70e-2
O: 3.35E-2
- B: 2.87E-5
+ B: 2.78E-5
Zr clad:
input format: number densities
ZR: 4.30E-2
Al clad:
input format: number densities
- AL: 6.00e-2
+ AL27: 6.00e-2
fission chamber:
- # Fission Chamber composition isn't well defined so this is a guess...
- # U235 density is based on Macros of UO2 for fission XS in group 3. Group 3
- # was chosen because it is below the U-238 fast fission cutoff, but also the
- # U-235 fission cross section is not high enough the self shielding will play
- # a large role. This assuming U235 is the only fissioning isotope. Assuming the
- # fission chamber is ~90% enriched (90.9% by atom but wanted only 1 sig fig).
- # Had very similar macro absorption XS as guide tube, so using same
- # Al and water with just a little U.
+ # NEA/NSC/DOC(96)2 Documents:
+ # "Central guide tube contains: moderator (as defined in Table 2)
+ # and 1.0E-8 at/(b cm) of U-235"
input format: number densities
H: 6.70e-2
O: 3.35E-2
- B: 2.87E-5
- U235: 6.0e-8
- U238: 6.0e-9
+ B: 2.78E-5
+ U235: 1.0e-8
# end-custom-isotopics
blocks:
uo2: &block_uo2
+ # NEA/NSC/DOC(96)2 Table 1 - Cell geometries
grid name: UO2 grid
fuel:
shape: Circle
material: UO2
isotopics: UO2
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: .8190
latticeIDs: [U]
gap 1: &fuel_gap_1
shape: Circle
material: Void
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: fuel.od
od: zirconium clad.id
latticeIDs: [U]
@@ -91,16 +87,16 @@ blocks:
shape: Circle
material: Custom
isotopics: Zr clad
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: .8360
od: .9500
latticeIDs: [U]
gap 2: &fuel_gap_2
shape: Circle
material: Void
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: zirconium clad.od
od: aluminum clad.id
latticeIDs: [U]
@@ -111,8 +107,8 @@ blocks:
shape: Circle
material: Custom
isotopics: Al clad
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: .9700
od: 1.0800
latticeIDs: [U]
@@ -122,21 +118,21 @@ blocks:
isotopics: moderator
Tinput: 450.0
Thot: 450.0
- # Moderator within the guide tube
+ # Moderator within the guide tube
inner moderator guide tube: &guide_tube_moderator
shape: Circle
material: SaturatedWater
isotopics: moderator
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: guide tube.id
latticeIDs: [GT]
guide tube: &guide_tube
shape: Circle
material: Custom
isotopics: Al clad
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: .6800
od: 1.0800
latticeIDs: [GT]
@@ -153,8 +149,8 @@ blocks:
shape: Circle
material: Custom
isotopics: fission chamber
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: .8190 # No documentation fission chamber dims of composition
latticeIDs: [FC]
inner moderator FC: &fission_chamber_mod
@@ -162,9 +158,8 @@ blocks:
# has same od as fuel, so there needs to be something in the gap.
shape: Circle
material: Void
- isotopics: moderator
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
id: fission chamber.od
od: guide tube.id
latticeIDs: [FC]
@@ -172,14 +167,14 @@ blocks:
# dummy component for assembly sizing
shape: Square
material: Void
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
widthInner: 21.42
widthOuter: 21.42
mult: 1.0
- latticeIDs: [FC]
- # latticeIDs is needed to ensure that the center of the bounding
- # Component shares the same center/origin as the rest of the
+ latticeIDs: [FC]
+ # latticeIDs is needed to ensure that the center of the bounding
+ # Component shares the same center/origin as the rest of the
# components in the block. See #332."
# end-block-uo2
mox: &block_mox
@@ -188,24 +183,24 @@ blocks:
shape: Circle
material: UO2
isotopics: mox low
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: .8190
latticeIDs: [ML]
mox medium fuel:
shape: Circle
material: UO2
isotopics: mox medium
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: .8190
latticeIDs: [MM]
mox high fuel:
shape: Circle
material: UO2
isotopics: mox high
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
od: .8190
latticeIDs: [MH]
void 1:
@@ -235,8 +230,8 @@ blocks:
shape: Square
material: SaturatedWater
isotopics: moderator
- Tinput: 25.0
- Thot: 25.0
+ Tinput: 20.0
+ Thot: 20.0
widthOuter: 21.42
mult: 1.0
# end-block-mod
@@ -262,7 +257,7 @@ assemblies:
- *block_mod
height: *heights
axial mesh points: *mesh
- xs types: [A,A,A,A]
+ xs types: [A, A, A, A]
mox:
flags: fuel
specifier: MOX
@@ -273,7 +268,7 @@ assemblies:
- *block_mod
height: *heights
axial mesh points: *mesh
- xs types: [A,A,A,A]
+ xs types: [A, A, A, A]
mod:
specifier: MOD
blocks:
@@ -283,16 +278,16 @@ assemblies:
- *block_mod
height: *heights
axial mesh points: *mesh
- xs types: [A,A,A,A]
+ xs types: [A, A, A, A]
# end-assemblies
systems:
core:
- grid name: core
+ grid name: core
- origin:
- x: 0.0
- y: 0.0
- z: 0.0
+ origin:
+ x: 0.0
+ y: 0.0
+ z: 0.0
# end-systems
grids:
core:
@@ -358,7 +353,7 @@ grids:
# end-grid-MOX
nuclide flags:
H: {burn: false, xs: true}
- O:
+ O:
burn: false
xs: true
expandTo: ["O16", "O17"] # O18 is not in many nuclear data sets.
diff --git a/armi/tests/tutorials/c5g7-settings.yaml b/armi/tests/tutorials/c5g7-settings.yaml
index 9abc3e902..815c246a1 100644
--- a/armi/tests/tutorials/c5g7-settings.yaml
+++ b/armi/tests/tutorials/c5g7-settings.yaml
@@ -1,16 +1,20 @@
settings:
+# global
availabilityFactor: 0.9
- fakeBad: 123456
- power: 1000000000.0
- cycleLength: 411.11
- loadingFile: c5g7-blueprints.yaml
- nCycles: 10
- burnSteps: 2
buGroups:
- 100
+ burnSteps: 2
comment: C5G7 LWR Benchmark inputs
- genXS: Neutron
+ cycleLength: 411.11
+ loadingFile: c5g7-blueprints.yaml
+ nCycles: 10
numProcessors: 1
- genReports: false
+ power: 1000000000.0
versions:
armi: uncontrolled
+
+# neutronics
+ genXS: Neutron
+
+# report
+ genReports: false
diff --git a/armi/tests/zpprTest.yaml b/armi/tests/zpprTest.yaml
index 2e54d5543..51a6bb813 100644
--- a/armi/tests/zpprTest.yaml
+++ b/armi/tests/zpprTest.yaml
@@ -1,10 +1,24 @@
metadata:
version: uncontrolled
settings:
+# global
+ Tin: 20.0
+ Tout: 20.0
buGroups:
- 100
burnSteps: 0
comment: ZPPR test case
+ cycleLength: 365.25
+ geomFile: zpprTestGeom.xml
+ loadingFile: 1DslabXSByCompTest.yaml
+ mpiTasksPerNode: 6
+ numProcessors: 12
+ outputFileExtension: pdf
+ power: 75000000.0
+ sortReactor: false # zpprs dont sor the right way. need better component sorting for slab...
+ verbosity: extra
+
+# cross section
crossSectionControl:
AA:
geometry: 1D slab
@@ -27,21 +41,8 @@ settings:
numInternalRings: 1
numExternalRings: 1
meshSubdivisionsPerCm: 10
- cycleLength: 365.25
- economics: false
- epsCyclic: 1.0
+
+# neutronics
epsEig: 1e-10
genXS: Neutron
- geomFile: zpprTestGeom.xml
- loadingFile: 1DslabXSByCompTest.yaml
- max2SigmaCladIDT: 630.0
- maxFlowZones: 12
- mpiTasksPerNode: 6
- numProcessors: 12
- outputFileExtension: pdf
- power: 75000000.0
- sortReactor: false # zpprs dont sor the right way. need better component sorting for slab...
- Tin: 20.0
- Tout: 20.0
- verbosity: extra
xsBlockRepresentation: ComponentAverage1DSlab
diff --git a/armi/utils/__init__.py b/armi/utils/__init__.py
index 2c8cfbb1d..f5943d02a 100644
--- a/armi/utils/__init__.py
+++ b/armi/utils/__init__.py
@@ -29,8 +29,8 @@
from armi import runLog
from armi.utils import iterables
-from armi.utils.flags import Flag # noqa: unused-import
-from armi.utils.mathematics import * # noqa: undefined-local-with-import-star
+from armi.utils.flags import Flag # noqa: F401
+from armi.utils.mathematics import * # noqa: F403
# Read in file 1 MB at a time to reduce memory burden of reading entire file at once
_HASH_BUFFER_SIZE = 1024 * 1024
@@ -470,7 +470,7 @@ def tryPickleOnAllContents(obj, ignore=None, verbose=False):
print("Checking {0}...".format(name))
try:
pickle.dumps(ob) # dump as a string
- except: # noqa: bare-except
+ except Exception:
print(
"{0} in {1} cannot be pickled. It is: {2}. ".format(name, obj, ob)
)
@@ -497,7 +497,7 @@ def doTestPickleOnAllContents2(obj, ignore=None):
if name not in ignore:
try:
pickle.dumps(ob) # dump as a string
- except: # noqa: bare-except
+ except Exception:
unpickleable.append(name)
print("Cant pickle {0}".format(name))
# recursive call.
@@ -509,7 +509,7 @@ def doTestPickleOnAllContents2(obj, ignore=None):
class MyPickler(pickle.Pickler):
- r"""
+ """
This will find your pickle errors if all else fails.
Use with tryPickleOnAllContents3.
@@ -747,7 +747,7 @@ def plotMatrix(
cmap.set_bad("w")
try:
matrix = matrix.todense()
- except: # noqa: bare-except
+ except Exception:
pass
if minV:
@@ -806,17 +806,39 @@ def merge(self, *otherDictionaries) -> None:
def safeCopy(src: str, dst: str) -> None:
"""This copy overwrites ``shutil.copy`` and checks that copy operation is truly completed before continuing."""
- waitTime = 0.01 # 10 ms
+ # Convert files to OS-independence
+ src = os.path.abspath(src)
+ dst = os.path.abspath(dst)
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
srcSize = os.path.getsize(src)
- shutil.copyfile(src, dst)
- shutil.copymode(src, dst)
+ if "win" in sys.platform:
+ shutil.copyfile(src, dst)
+ shutil.copymode(src, dst)
+ elif "linux" in sys.platform:
+ cmd = f'cp "{src}" "{dst}"'
+ os.system(cmd)
+ else:
+ raise OSError(
+ "Cannot perform ``safeCopy`` on files because ARMI only supports "
+ + "Linux and Windows."
+ )
+ waitTime = 0.01 # 10 ms
+ maxWaitTime = 300 # 5 min
+ totalWaitTime = 0
while True:
dstSize = os.path.getsize(dst)
if srcSize == dstSize:
break
time.sleep(waitTime)
+ totalWaitTime += waitTime
+ if totalWaitTime > maxWaitTime:
+ runLog.warning(
+ f"File copy from {dst} to {src} has failed due to exceeding "
+ + f"a maximum wait time of {maxWaitTime/60} minutes."
+ )
+ break
+
runLog.extra("Copied {} -> {}".format(src, dst))
diff --git a/armi/utils/codeTiming.py b/armi/utils/codeTiming.py
index b205bbe11..c01b21435 100644
--- a/armi/utils/codeTiming.py
+++ b/armi/utils/codeTiming.py
@@ -182,7 +182,7 @@ def getActiveTimers():
@staticmethod
def report(inclusion_cutoff=0.1, total_time=False):
- r"""
+ """
Write a string report of the timers.
Parameters
@@ -190,7 +190,8 @@ def report(inclusion_cutoff=0.1, total_time=False):
inclusion_cutoff : float, optional
Will not show results that have less than this fraction of the total time.
total_time : bool, optional
- Use either the ratio of total time or time since last report for consideration against the cutoff
+ Use either the ratio of total time or time since last report for consideration against
+ the cutoff
See Also
--------
@@ -232,7 +233,8 @@ def timeline(base_file_name, inclusion_cutoff=0.1, total_time=False):
inclusion_cutoff : float, optional
Will not show results that have less than this fraction of the total time.
total_time : bool, optional
- Use either the ratio of total time or time since last report for consideration against the cutoff
+ Use either the ratio of total time or time since last report for consideration against
+ the cutoff
"""
import matplotlib.pyplot as plt
import numpy as np
@@ -308,12 +310,13 @@ def flatMerge(
class _Timer:
- r"""Code timer to call at various points to measure performance.
+ """Code timer to call at various points to measure performance.
see MasterTimer.getTimer() for construction
"""
- _frozen = False # if the master timer stops, all timers must freeze, with no thaw (how would that make sense in a run?)
+ # If the master timer stops, all timers must freeze with no thaw.
+ _frozen = False
def __init__(self, name, start):
self.name = name
@@ -374,7 +377,7 @@ def timeSinceReport(self):
@property
def times(self):
- """List of time start and stop pairs, if active the current time is used as the last stop."""
+ """List of time start / stop pairs, if active the current time is used as the last stop."""
if self.isActive:
times = copy.deepcopy(self._times)
times[-1] = (self._times[-1][0], MasterTimer.time())
diff --git a/armi/utils/customExceptions.py b/armi/utils/customExceptions.py
index 6d3fe9550..229b31ffa 100644
--- a/armi/utils/customExceptions.py
+++ b/armi/utils/customExceptions.py
@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Globally accessible exception definitions for better granularity on exception behavior and exception handling behavior."""
+"""
+Globally accessible exception definitions for better granularity on exception behavior and
+exception handling behavior.
+"""
from armi import runLog
from inspect import stack, getframeinfo
@@ -60,7 +63,7 @@ def decorated(*args, **kwargs):
def warn_when_root(func):
- r"""Decorates a method to produce a warning message only on the root node."""
+ """Decorates a method to produce a warning message only on the root node."""
return _message_when_root(warn(func))
@@ -99,15 +102,18 @@ def __init__(self, msg):
class InvalidSettingsStopProcess(SettingException):
- """Exception raised when setting file contains invalid settings and user aborts or process is uninteractive."""
+ """
+ Exception raised when setting file contains invalid settings and user aborts or process is
+ uninteractive.
+ """
def __init__(self, reader):
msg = "Input settings file {}".format(reader.inputPath)
if reader.liveVersion != reader.inputVersion:
msg += (
'\n\twas made with version "{0}" which differs from the current version "{1}." '
- 'Either create the input file with the "{1}", or switch to a development version of ARMI.'
- "".format(reader.inputVersion, reader.liveVersion)
+ 'Either create the input file with the "{1}", or switch to a development version '
+ "of ARMI.".format(reader.inputVersion, reader.liveVersion)
)
if reader.invalidSettings:
msg += (
diff --git a/armi/utils/directoryChangersMpi.py b/armi/utils/directoryChangersMpi.py
index b479722cb..85c665b82 100644
--- a/armi/utils/directoryChangersMpi.py
+++ b/armi/utils/directoryChangersMpi.py
@@ -54,6 +54,7 @@ def open(self):
def close(self):
cdma = _ChangeDirectoryMpiAction(self.initial)
+ cdma = cdma.broadcast(cdma)
cdma.invoke(None, None, None)
diff --git a/armi/utils/flags.py b/armi/utils/flags.py
index 08ca04532..1dc710e9e 100644
--- a/armi/utils/flags.py
+++ b/armi/utils/flags.py
@@ -29,7 +29,7 @@
from typing import Dict, Union, Sequence, List, Tuple
-class auto: # noqa: invalid-class-name
+class auto: # noqa: N801
"""
Empty class for requesting a lazily-evaluated automatic field value.
diff --git a/armi/utils/gridEditor.py b/armi/utils/gridEditor.py
index 31fa295ed..5bf530338 100644
--- a/armi/utils/gridEditor.py
+++ b/armi/utils/gridEditor.py
@@ -21,6 +21,11 @@
$ python -m armi grids
+If you have an existing set of input files, pass in the blueprints input file
+as the first argument and the system will load up the associated grid, e.g.::
+
+ $ python -m armi grids FFTF-blueprints.yaml
+
**Known Issues**
@@ -53,7 +58,7 @@
import wx
import wx.adv
-import numpy
+import numpy as np
import numpy.linalg
from armi.utils import hexagon
@@ -76,45 +81,45 @@
# what the Nones are for below. Future work to employ these. Colors are RGB fractions.
FLAG_STYLES = {
# Red
- Flags.FUEL: (numpy.array([1.0, 0.0, 0.0]), None),
+ Flags.FUEL: (np.array([1.0, 0.0, 0.0]), None),
# Green
- Flags.CONTROL: (numpy.array([0.0, 1.0, 0.0]), None),
+ Flags.CONTROL: (np.array([0.0, 1.0, 0.0]), None),
# Gray
- Flags.SHIELD: (numpy.array([0.4, 0.4, 0.4]), None),
+ Flags.SHIELD: (np.array([0.4, 0.4, 0.4]), None),
# Yellow
- Flags.REFLECTOR: (numpy.array([0.5, 0.5, 0.0]), None),
+ Flags.REFLECTOR: (np.array([0.5, 0.5, 0.0]), None),
# Paisley?
- Flags.INNER: (numpy.array([0.5, 0.5, 1.0]), None),
+ Flags.INNER: (np.array([0.5, 0.5, 1.0]), None),
# We shouldn't see many SECONDARY, OUTER, MIDDLE, etc. on their own, so these
# will just darken or brighten whatever color we would otherwise get)
- Flags.SECONDARY: (numpy.array([0.0, 0.0, 0.0]), None),
- Flags.OUTER: (numpy.array([0.0, 0.0, 0.0]), None),
+ Flags.SECONDARY: (np.array([0.0, 0.0, 0.0]), None),
+ Flags.OUTER: (np.array([0.0, 0.0, 0.0]), None),
# WHITE (same as above, this will just lighten anything that it accompanies)
- Flags.MIDDLE: (numpy.array([1.0, 1.0, 1.0]), None),
- Flags.ANNULAR: (numpy.array([1.0, 1.0, 1.0]), None),
- Flags.IGNITER: (numpy.array([0.2, 0.2, 0.2]), None),
- Flags.STARTER: (numpy.array([0.4, 0.4, 0.4]), None),
- Flags.FEED: (numpy.array([0.6, 0.6, 0.6]), None),
- Flags.DRIVER: (numpy.array([0.8, 0.8, 0.8]), None),
+ Flags.MIDDLE: (np.array([1.0, 1.0, 1.0]), None),
+ Flags.ANNULAR: (np.array([1.0, 1.0, 1.0]), None),
+ Flags.IGNITER: (np.array([0.2, 0.2, 0.2]), None),
+ Flags.STARTER: (np.array([0.4, 0.4, 0.4]), None),
+ Flags.FEED: (np.array([0.6, 0.6, 0.6]), None),
+ Flags.DRIVER: (np.array([0.8, 0.8, 0.8]), None),
}
# RGB weights for calculating luminance. We use this to decide whether we should put
# white or black text on top of the color. These come from CCIR 601
-LUMINANCE_WEIGHTS = numpy.array([0.3, 0.59, 0.11])
+LUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11])
def _translationMatrix(x, y):
"""Return an affine transformation matrix representing an x- and y-translation."""
- return numpy.array([[1.0, 0.0, x], [0.0, 1.0, y], [0.0, 0.0, 1.0]])
+ return np.array([[1.0, 0.0, x], [0.0, 1.0, y], [0.0, 0.0, 1.0]])
-def _boundingBox(points: Sequence[numpy.ndarray]) -> wx.Rect:
+def _boundingBox(points: Sequence[np.ndarray]) -> wx.Rect:
"""Return the smallest wx.Rect that contains all of the passed points."""
- xmin = numpy.amin([p[0] for p in points])
- xmax = numpy.amax([p[0] for p in points])
+ xmin = np.amin([p[0] for p in points])
+ xmax = np.amax([p[0] for p in points])
- ymin = numpy.amin([p[1] for p in points])
- ymax = numpy.amax([p[1] for p in points])
+ ymin = np.amin([p[1] for p in points])
+ ymax = np.amax([p[1] for p in points])
return wx.Rect(wx.Point(int(xmin), int(ymin)), wx.Point(int(xmax), int(ymax)))
@@ -123,12 +128,12 @@ def _desaturate(c: Sequence[float]):
r, g, b = tuple(c)
hue, lig, sat = colorsys.rgb_to_hls(r, g, b)
lig = lig + (1.0 - lig) * 0.5
- return numpy.array(colorsys.hls_to_rgb(hue, lig, sat))
+ return np.array(colorsys.hls_to_rgb(hue, lig, sat))
def _getColorAndBrushFromFlags(f, bold=True):
"""Given a set of Flags, return a wx.Pen and wx.Brush with which to draw a shape."""
- c = numpy.array([0.0, 0.0, 0.0])
+ c = np.array([0.0, 0.0, 0.0])
nColors = 0
for styleFlag, style in FLAG_STYLES.items():
@@ -160,8 +165,8 @@ def _getColorAndBrushFromFlags(f, bold=True):
def _drawShape(
dc: wx.DC,
geom: geometry.GeomType,
- view: numpy.ndarray,
- model: Optional[numpy.ndarray] = None,
+ view: np.ndarray,
+ model: Optional[np.ndarray] = None,
label: str = "",
description: Optional[str] = None,
bold: bool = True,
@@ -176,9 +181,9 @@ def _drawShape(
The device context to draw to
geom: geometry.GeomType
The geometry type, which defines the shape to be drawn
- view: numpy.ndarray
+ view: np.ndarray
A 3x3 matrix defining the world transform
- model: numpy.ndarray, optional
+ model: np.ndarray, optional
A 3x3 matrix defining the model transform. No transform is made to the "unit"
shape if no model transform is provided.
label: str, optional
@@ -204,8 +209,8 @@ def _drawShape(
raise ValueError("Geom type `{}` unsupported".format(geom))
# Appending 1 to each coordinate since the transformation matrix is 3x3
- poly = numpy.array([numpy.append(vertex, 1) for vertex in primitive]).transpose()
- model = model if model is not None else numpy.eye(3)
+ poly = np.array([np.append(vertex, 1) for vertex in primitive]).transpose()
+ model = model if model is not None else np.eye(3)
poly = view.dot(model).dot(poly).transpose()
poly = [wx.Point(int(vertex[0]), int(vertex[1])) for vertex in poly]
@@ -452,15 +457,15 @@ def __init__(
for key, design in self.assemDesigns.items():
# flip y-coordinates, enlarge, offset
- flip_y = numpy.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
- scale = numpy.array(
+ flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
+ scale = np.array(
[
[UNIT_SIZE * 0.8, 0.0, 0.0],
[0.0, UNIT_SIZE * 0.8, 0.0],
[0.0, 0.0, 1.0],
]
)
- translate = numpy.array(
+ translate = np.array(
[
[1.0, 0.0, UNIT_SIZE * 0.5],
[0.0, 1.0, UNIT_SIZE * 0.5],
@@ -845,8 +850,8 @@ def drawGrid(self):
gridScale = self._gridScale(self.grid)
# flip y-coordinates, enlarge
- flip_y = numpy.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
- scale = numpy.array(
+ flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
+ scale = np.array(
[
[UNIT_SIZE / gridScale[0], 0.0, 0.0],
[0.0, UNIT_SIZE / gridScale[1], 0.0],
@@ -855,7 +860,7 @@ def drawGrid(self):
)
# uniform grid, so all shapes have the same scale
- model = numpy.array(
+ model = np.array(
[[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]]
)
self.transform = flip_y.dot(scale)
@@ -881,7 +886,7 @@ def drawGrid(self):
label, description, bold = self._getLabel(idx)
- coords = numpy.array(self.grid.getCoordinates(idx))[:2]
+ coords = np.array(self.grid.getCoordinates(idx))[:2]
offset = _translationMatrix(*coords)
boundingBox = _drawShape(
@@ -1108,7 +1113,7 @@ def applyAssem(
# uniform grid, so all shapes have the same scale
gridScale = self._gridScale(self.grid)
- model = numpy.array(
+ model = np.array(
[[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]]
)
@@ -1133,7 +1138,7 @@ def applyAssem(
self.pdc.ClearId(pdcId)
self.pdc.SetId(pdcId)
- coords = numpy.array(self.grid.getCoordinates(idx))
+ coords = np.array(self.grid.getCoordinates(idx))
model = _translationMatrix(*coords[0:2]).dot(model)
label, description, bold = self._getLabel(idx)
@@ -1157,11 +1162,11 @@ def _gridScale(grid):
if isinstance(grid, grids.HexGrid):
# Unit steps aren't aligned with the x,y coordinate system for Hex, so just
# use the y dimension, assuming that's the proper flat-to-flat dimension
- coordScale = numpy.array([grid._unitSteps[1][1]] * 2)
+ coordScale = np.array([grid._unitSteps[1][1]] * 2)
elif isinstance(grid, grids.CartesianGrid):
# Cartesian grids align with the GUI coordinates, so just use unit steps
# directly
- coordScale = numpy.array([grid._unitSteps[0][0], grid._unitSteps[1][1]])
+ coordScale = np.array([grid._unitSteps[0][0], grid._unitSteps[1][1]])
return coordScale
def _calcGridBounds(self) -> wx.Rect:
@@ -1180,15 +1185,13 @@ def _calcGridBounds(self) -> wx.Rect:
_ = self._gridScale(self.grid)
- allCenters = numpy.array(
- [self.grid.getCoordinates(idx)[:2] for idx in inDomain]
- )
- minXY = numpy.amin(allCenters, axis=0)
- maxXY = numpy.amax(allCenters, axis=0)
+ allCenters = np.array([self.grid.getCoordinates(idx)[:2] for idx in inDomain])
+ minXY = np.amin(allCenters, axis=0)
+ maxXY = np.amax(allCenters, axis=0)
- topRight = numpy.append([maxXY[1], maxXY[1]], 1.0)
- bottomLeft = numpy.append([minXY[0], minXY[1]], 1.0)
- nudge = numpy.array([UNIT_MARGIN, -UNIT_MARGIN, 0.0])
+ topRight = np.append([maxXY[1], maxXY[1]], 1.0)
+ bottomLeft = np.append([minXY[0], minXY[1]], 1.0)
+ nudge = np.array([UNIT_MARGIN, -UNIT_MARGIN, 0.0])
bottomRight = (self.transform.dot(topRight) + nudge).tolist()
topLeft = (self.transform.dot(bottomLeft) - nudge).tolist()
diff --git a/armi/utils/hexagon.py b/armi/utils/hexagon.py
index 3831d43be..126c55e1c 100644
--- a/armi/utils/hexagon.py
+++ b/armi/utils/hexagon.py
@@ -23,7 +23,7 @@
import math
-import numpy
+import numpy as np
SQRT3 = math.sqrt(3.0)
@@ -70,7 +70,7 @@ def corners(rotation=0):
Zero rotation implies flat-to-flat aligned with y-axis. Origin in the center.
"""
- points = numpy.array(
+ points = np.array(
[
(1.0 / (2.0 * math.sqrt(3.0)), 0.5),
(1.0 / math.sqrt(3.0), 0.0),
@@ -83,14 +83,14 @@ def corners(rotation=0):
rotation = rotation / 180.0 * math.pi
- rotation = numpy.array(
+ rotation = np.array(
[
[math.cos(rotation), -math.sin(rotation)],
[math.sin(rotation), math.cos(rotation)],
]
)
- return numpy.array([tuple(rotation.dot(point)) for point in points])
+ return np.array([tuple(rotation.dot(point)) for point in points])
def pitch(side):
@@ -145,3 +145,49 @@ def numPositionsInRing(ring):
rings is indexed to 1, i.e. the centermost position in the lattice is ``ring=1``.
"""
return (ring - 1) * 6 if ring != 1 else 1
+
+
+def totalPositionsUpToRing(ring: int) -> int:
+ """Return the number of positions in a hexagon with a given number of rings."""
+ return 1 + 3 * ring * (ring - 1)
+
+
+def getIndexOfRotatedCell(initialCellIndex: int, orientationNumber: int) -> int:
+ """Obtain a new cell number after placing a hexagon in a new orientation.
+
+ Parameters
+ ----------
+ initialCellIndex : int
+ Positive number for this cell's position in a hexagonal lattice.
+ orientationNumber :
+ Orientation in number of 60 degree, counter clockwise rotations. An orientation
+ of zero means the first cell in each ring of a flags up hexagon is in the upper
+ right corner.
+
+ Returns
+ -------
+ int
+ New cell number across the rotation
+
+ Raises
+ ------
+ ValueError
+ If ``initialCellIndex`` is not positive.
+ If ``orientationNumber`` is less than zero or greater than five.
+ """
+ if orientationNumber < 0 or orientationNumber > 5:
+ raise ValueError(
+ f"Orientation number must be in [0:5], got {orientationNumber}"
+ )
+ if initialCellIndex > 1:
+ if orientationNumber == 0:
+ return initialCellIndex
+ ring = numRingsToHoldNumCells(initialCellIndex)
+ tot_pins = totalPositionsUpToRing(ring)
+ newPinLocation = initialCellIndex + (ring - 1) * orientationNumber
+ if newPinLocation > tot_pins:
+ newPinLocation -= (ring - 1) * 6
+ return newPinLocation
+ elif initialCellIndex == 1:
+ return initialCellIndex
+ raise ValueError(f"Cell number must be positive, got {initialCellIndex}")
diff --git a/armi/utils/iterables.py b/armi/utils/iterables.py
index 8f6720410..ce204aa0a 100644
--- a/armi/utils/iterables.py
+++ b/armi/utils/iterables.py
@@ -18,6 +18,8 @@
from six.moves import filterfalse, map, xrange, filter
+import numpy as np
+
def flatten(lst):
"""Flattens an iterable of iterables by one level.
@@ -241,3 +243,27 @@ def __add__(self, other):
def __iadd__(self, other):
self.extend(Sequence(other))
return self
+
+
+def pivot(items, position: int):
+ """Pivot the items in an iterable to start at a given position.
+
+ Functionally just ``items[position:] + items[:position]`` with
+ some logic to handle numpy arrays (concatenation not summation)
+
+ Parameters
+ ----------
+ items : list or numpy.ndarray
+ Sequence to be re-ordered
+ position : int
+ Position that will be the first item in the sequence after the pivot
+
+ Returns
+ -------
+ list or numpy.ndarray
+ """
+ if isinstance(items, np.ndarray):
+ return np.concatenate((items[position:], items[:position]))
+ elif isinstance(items, list):
+ return items[position:] + items[:position]
+ raise TypeError(f"Pivoting {type(items)} not supported : {items}")
diff --git a/armi/utils/mathematics.py b/armi/utils/mathematics.py
index afff43f4d..3169e92be 100644
--- a/armi/utils/mathematics.py
+++ b/armi/utils/mathematics.py
@@ -26,7 +26,7 @@
def average1DWithinTolerance(vals, tolerance=0.2):
"""
- Compute the average of a series of arrays with a tolerance.
+ Compute the average of a series of 1D arrays with a tolerance.
Tuned for averaging assembly meshes or block heights.
@@ -34,6 +34,13 @@ def average1DWithinTolerance(vals, tolerance=0.2):
----------
vals : 2D np.array
could be assembly x axial mesh tops or heights
+ tolerance : float
+ The accuracy to which we need to know the average.
+
+ Returns
+ -------
+ 1D np.array
+ The average of all the input 1D NumPy arrays.
"""
vals = np.array(vals)
@@ -57,11 +64,10 @@ def average1DWithinTolerance(vals, tolerance=0.2):
return avg
-def convertToSlice(x, increment=False):
+def convertToSlice(x, increment=0):
"""
- Convert a int, float, list of ints or floats, None, or slice
- to a slice. Also optionally increments that slice to make it easy to line
- up lists that don't start with 0.
+ Convert a int, float, list of ints or floats, None, or slice to a slice. Also optionally
+ increments that slice to make it easy to line up lists that don't start with 0.
Use this with np.array (np.ndarray) types to easily get selections of it's elements.
@@ -72,15 +78,17 @@ def convertToSlice(x, increment=False):
list of int: select these index numbers.
None: select all indices.
slice: select this slice
+ increment : integer (or boolean), optional
+ Step size, when taking your slices. (`False` is zero.)
Returns
-------
slice : slice
- Returns a slice object that can be used in an array
- like a[x] to select from its members.
- Also, the slice has its index numbers decremented by 1.
- It can also return a numpy array, which can be used
- to slice other numpy arrays in the same way as a slice.
+ Returns a slice object that can be used in an array like a[x] to select from its members.
+ Also, the slice has its index numbers decremented by 1. It can also return a numpy array,
+ which can be used to slice other numpy arrays in the same way as a slice.
+ increment : int
+ Step size to take, if you want to take less then every datum in the collection.
Examples
--------
@@ -102,19 +110,12 @@ def convertToSlice(x, increment=False):
>>> a[utils.convertToSlice([1, 3])]
array([11, 13])
-
>>> a[utils.convertToSlice([1, 3], increment=-1)]
array([10, 12])
>>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]
array([11])
"""
- if increment is False:
- increment = 0
-
- if not isinstance(increment, int):
- raise Exception("increment must be False or an integer in utils.convertToSlice")
-
if x is None:
x = np.s_[:]
@@ -143,15 +144,11 @@ def convertToSlice(x, increment=False):
jstep = x.step
return np.s_[jstart:jstop:jstep]
-
elif isinstance(x, np.ndarray):
return np.array([i + increment for i in x])
-
else:
raise Exception(
- (
- "It is not known how to handle x type: " "{0} in utils.convertToSlice"
- ).format(type(x))
+ f"It is not known how to handle x type: {type(x)} in utils.convertToSlice"
)
@@ -200,7 +197,7 @@ def expandRepeatedFloats(repeatedList):
def findClosest(listToSearch, val, indx=False):
- r"""
+ """
Find closest item in a list.
Parameters
@@ -277,7 +274,7 @@ def getFloat(val):
try:
newVal = float(val)
return newVal
- except: # noqa: bare-except
+ except Exception:
return None
@@ -325,7 +322,7 @@ def isMonotonic(inputIter, relation):
def linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):
- r"""
+ """
Does a linear interpolation (or extrapolation) for y=f(x).
Parameters
@@ -373,8 +370,8 @@ def minimizeScalarFunc(
method=None,
tol=1.0e-3,
):
- r"""
- Use scipy minimize with the given function, goal value, and first guess.
+ """
+ Use SciPy minimize with the given function, goal value, and first guess.
Parameters
----------
@@ -496,11 +493,15 @@ def parabolaFromPoints(p1, p2, p3):
----------
p1 : tuple
first point (x,y) coordinates
- p2,p3: tuple, second and third points.
+ p2 : tuple
+ second (x,y) points
+ p3 : tuple
+ third (x,y) points
Returns
-------
- a,b,c coefficients of y=ax^2+bx+c
+ tuple
+ 3 floats: a,b,c coefficients of y=ax^2+bx+c
"""
A = np.array(
[[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]]
@@ -518,7 +519,7 @@ def parabolaFromPoints(p1, p2, p3):
def parabolicInterpolation(ap, bp, cp, targetY):
- r"""
+ """
Given parabola coefficients, this interpolates the time
that would give k=targetK.
@@ -532,10 +533,13 @@ def parabolicInterpolation(ap, bp, cp, targetY):
Parameters
----------
- ap, bp,cp : floats
- coefficients of a parabola y = ap*x^2 + bp*x + cp
-
- targetK : float
+ ap : float
+ coefficients ap of a parabola y = ap*x^2 + bp*x + cp
+ bp : float
+ coefficients bp of a parabola y = ap*x^2 + bp*x + cp
+ cp : float
+ coefficients cp of a parabola y = ap*x^2 + bp*x + cp
+ targetY : float
The keff to find the cycle length of
Returns
@@ -587,13 +591,10 @@ def resampleStepwise(xin, yin, xout, avg=True):
----------
xin : list
interval points / mesh points
-
yin : list
interval values / inter-mesh values
-
xout : list
new interval points / new mesh points
-
avg : bool
By default, this is set to True, forcing the resampling to be done
by averaging. But if this is False, the resmampling will be done by
@@ -659,19 +660,19 @@ def rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None):
Parameters
----------
- x, y : array_like
- coordinates
-
+ x : float
+ X coordinates, array-like
+ y : float
+ Y coordinates, array-like
degreesCounterclockwise : float
Degrees to rotate in the CCW direction
-
radiansCounterclockwise : float
Radians to rotate in the CCW direction
Returns
-------
- xr, yr : array_like
- the rotated coordinates.
+ tuple
+ xr, yr: the rotated coordinates
"""
if radiansCounterclockwise is None:
radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0
diff --git a/armi/utils/parsing.py b/armi/utils/parsing.py
index 92bc7cb73..3bd379688 100644
--- a/armi/utils/parsing.py
+++ b/armi/utils/parsing.py
@@ -61,7 +61,7 @@ def _numericSpecialBehavior(source, rt):
def parseValue(source, requestedType, allowNone=False, matchingNonetype=True):
"""Tries parse a python value, expecting input to be the right type or a string."""
# misuse prevention
- if requestedType == str:
+ if requestedType is str:
raise TypeError(
"Unreliable and unnecessary to use parseValue for strs and unicodes. "
"Given parameters are {}, {}, {}.".format(source, requestedType, allowNone)
diff --git a/armi/utils/pathTools.py b/armi/utils/pathTools.py
index d0594736c..ad1d6c575 100644
--- a/armi/utils/pathTools.py
+++ b/armi/utils/pathTools.py
@@ -25,6 +25,16 @@
from armi import context
from armi import runLog
+DO_NOT_CLEAN_PATHS = [
+ "armiruns",
+ "failedruns",
+ "mc2run",
+ "mongoose",
+ "shufflebranches",
+ "snapshot",
+ "tests",
+]
+
def armiAbsPath(*pathParts):
"""
@@ -40,7 +50,7 @@ def armiAbsPath(*pathParts):
from ccl import common_operations
return common_operations.convert_to_unc_path(result)
- except: # noqa: bare-except;reason=avoid pywin32 p.load parallel issues
+ except Exception:
return result
@@ -51,10 +61,8 @@ def copyOrWarn(fileDescription, sourcePath, destinationPath):
----------
fileDescription : str
a description of the file and/or operation being performed.
-
sourcePath : str
Path of the file to be copied.
-
destinationPath : str
Path for the copied file.
"""
@@ -185,7 +193,7 @@ def moduleAndAttributeExist(pathAttr):
userSpecifiedModule = importCustomPyModule(modulePath)
# Blanket except is okay since we are checking to see if a custom import will work.
- except: # noqa: bare-except
+ except Exception:
return False
return moduleAttributeName in userSpecifiedModule.__dict__
@@ -194,12 +202,12 @@ def moduleAndAttributeExist(pathAttr):
def cleanPath(path, mpiRank=0):
"""Recursively delete a path.
- !!! careful with this !!! It can delete the entire cluster.
+ !!! Be careful with this !!! It can delete the entire cluster.
- We add copious os.path.exists checks in case an MPI set of things is trying to delete everything at the same time.
- Always check filenames for some special flag when calling this, especially
- with full permissions on the cluster. You could accidentally delete everyone's work
- with one misplaced line! This doesn't ask questions.
+ We add copious os.path.exists checks in case an MPI set of things is trying to delete everything
+ at the same time. Always check filenames for some special flag when calling this, especially
+ with full permissions on the cluster. You could accidentally delete everyone's work with one
+ misplaced line! This doesn't ask questions.
Safety nets include an allow-list of paths.
@@ -214,15 +222,7 @@ def cleanPath(path, mpiRank=0):
if not os.path.exists(path):
return True
- for validPath in [
- "armiruns",
- "failedruns",
- "mc2run",
- "mongoose",
- "shufflebranches",
- "snapshot",
- "tests",
- ]:
+ for validPath in DO_NOT_CLEAN_PATHS:
if validPath in path.lower():
valid = True
diff --git a/armi/utils/plotting.py b/armi/utils/plotting.py
index b915ece16..8e713eeb1 100644
--- a/armi/utils/plotting.py
+++ b/armi/utils/plotting.py
@@ -37,7 +37,7 @@
import matplotlib.patches
import matplotlib.pyplot as plt
import matplotlib.text as mpl_text
-import numpy
+import numpy as np
from armi import runLog
from armi.bookkeeping import report
@@ -49,7 +49,7 @@
from armi.utils import hexagon
-LUMINANCE_WEIGHTS = numpy.array([0.3, 0.59, 0.11, 0.0])
+LUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11, 0.0])
def colorGenerator(skippedColors=10):
@@ -128,7 +128,7 @@ def plotBlockDepthMap(
paramValsAtElevation.append(a.getBlockAtElevation(elevation).p[param])
data.append(paramValsAtElevation)
- data = numpy.array(data)
+ data = np.array(data)
fig = plt.figure(figsize=(12, 12), dpi=100)
# Make these now, so they are still referenceable after plotFaceMap.
@@ -328,11 +328,11 @@ def plotFaceMap(
"They should be equal length.".format(len(data), len(labels))
)
- collection.set_array(numpy.array(data))
+ collection.set_array(np.array(data))
if minScale or maxScale:
collection.set_clim([minScale, maxScale])
else:
- collection.norm.autoscale(numpy.array(data))
+ collection.norm.autoscale(np.array(data))
ax.add_collection(collection)
# Makes text in the center of each shape displaying the values.
@@ -343,9 +343,9 @@ def plotFaceMap(
if makeColorBar:
collection2 = PatchCollection(patches, cmap=cmapName, alpha=1.0)
if minScale and maxScale:
- collection2.set_array(numpy.array([minScale, maxScale]))
+ collection2.set_array(np.array([minScale, maxScale]))
else:
- collection2.set_array(numpy.array(data))
+ collection2.set_array(np.array(data))
if "radial" in cBarLabel:
colbar = fig.colorbar(
@@ -408,9 +408,7 @@ def plotFaceMap(
else:
plt.show()
- # don't close figure here. Have caller call plotting.close or plt.close when
- # they are done with it.
-
+ plt.close(fig)
return fName
@@ -427,7 +425,7 @@ def close(fig=None):
def _makeAssemPatches(core):
- """Return a list of assembly shaped patch for each assembly."""
+ """Return a list of assembly shaped patches for each assembly."""
patches = []
if isinstance(core.spatialGrid, grids.HexGrid):
@@ -443,8 +441,12 @@ def _makeAssemPatches(core):
for a in core:
x, y, _ = a.spatialLocator.getLocalCoordinates()
if nSides == 6:
+ if core.spatialGrid.cornersUp:
+ orientation = 0
+ else:
+ orientation = math.pi / 2.0
assemPatch = matplotlib.patches.RegularPolygon(
- (x, y), nSides, radius=pitch / math.sqrt(3), orientation=math.pi / 2.0
+ (x, y), nSides, radius=pitch / math.sqrt(3), orientation=orientation
)
elif nSides == 4:
# for rectangle x, y is defined as sides instead of center
@@ -463,7 +465,7 @@ def _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collectio
for a, val, label in zip(core, data, labels):
x, y, _ = a.spatialLocator.getLocalCoordinates()
cmap = collection.get_cmap()
- patchColor = numpy.asarray(cmap(collection.norm(val)))
+ patchColor = np.asarray(cmap(collection.norm(val)))
luminance = patchColor.dot(LUMINANCE_WEIGHTS)
dark = luminance < 0.5
if dark:
@@ -549,7 +551,7 @@ def legend_artist(self, _legend, orig_handle, _fontsize, handlebox):
transform=handlebox.get_transform(),
)
- luminance = numpy.array(colorRgb).dot(LUMINANCE_WEIGHTS)
+ luminance = np.array(colorRgb).dot(LUMINANCE_WEIGHTS)
dark = luminance < 0.5
if dark:
color = "white"
@@ -806,10 +808,10 @@ def plotAssemblyTypes(
ax.yaxis.set_ticks_position("left")
yBlockHeights.insert(0, 0.0)
yBlockHeights.sort()
- yBlockHeightDiffs = numpy.diff(
+ yBlockHeightDiffs = np.diff(
yBlockHeights
) # Compute differential heights between each block
- ax.set_yticks([0.0] + list(set(numpy.cumsum(yBlockHeightDiffs))))
+ ax.set_yticks([0.0] + list(set(np.cumsum(yBlockHeightDiffs))))
ax.xaxis.set_visible(False)
ax.set_title(title, y=1.03)
@@ -821,8 +823,8 @@ def plotAssemblyTypes(
if fileName:
fig.savefig(fileName)
runLog.debug("Writing assem layout {} in {}".format(fileName, os.getcwd()))
- plt.close(fig)
+ plt.close(fig)
return fig
@@ -944,10 +946,6 @@ def plotBlockFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList
bList2 :
a separate list of blocks that will also be plotted on a separate axis on the same plot.
This is useful for comparing flux in some blocks with flux in some other blocks.
-
- Notes
- -----
- This is not a great method. It should be cleand up and migrated into ``utils.plotting``.
"""
class BlockListFlux:
@@ -964,13 +962,13 @@ def __init__(
self.E = None
if not blockList:
- self.avgFlux = numpy.zeros(self.nGroup)
- self.peakFlux = numpy.zeros(self.nGroup)
+ self.avgFlux = np.zeros(self.nGroup)
+ self.peakFlux = np.zeros(self.nGroup)
self.lineAvg = "-"
self.linePeak = "-"
else:
- self.avgFlux = numpy.zeros(self.nGroup)
- self.peakFlux = numpy.zeros(self.nGroup)
+ self.avgFlux = np.zeros(self.nGroup)
+ self.peakFlux = np.zeros(self.nGroup)
if self.adjoint:
self.labelAvg = "Average Adjoint Flux"
@@ -988,8 +986,8 @@ def __init__(
def calcAverage(self):
for b in self.blockList:
- thisFlux = numpy.array(b.getMgFlux(adjoint=self.adjoint))
- self.avgFlux += numpy.array(thisFlux)
+ thisFlux = np.array(b.getMgFlux(adjoint=self.adjoint))
+ self.avgFlux += np.array(thisFlux)
if sum(thisFlux) > sum(self.peakFlux):
self.peakFlux = thisFlux
@@ -1022,7 +1020,7 @@ def getTable(self):
try:
G = len(core.lib.neutronEnergyUpperBounds)
- except: # noqa: bare-except
+ except Exception:
runLog.warning("No ISOTXS library attached so no flux plots.")
return
@@ -1089,7 +1087,6 @@ def getTable(self):
if fName:
plt.savefig(fName)
- plt.close()
report.setData(
"Flux Plot {}".format(os.path.split(fName)[1]),
os.path.abspath(fName),
@@ -1098,6 +1095,8 @@ def getTable(self):
else:
plt.show()
+ plt.close()
+
def makeHistogram(x, y):
"""
@@ -1110,8 +1109,8 @@ def makeHistogram(x, y):
+ "len(x) == {} and len(y) == {}".format(len(x), len(y))
)
n = len(x)
- xHistogram = numpy.zeros(2 * n)
- yHistogram = numpy.zeros(2 * n)
+ xHistogram = np.zeros(2 * n)
+ yHistogram = np.zeros(2 * n)
for i in range(n):
lower = 2 * i
upper = 2 * i + 1
@@ -1194,9 +1193,8 @@ def _makeBlockPinPatches(block, cold):
)
else:
raise TypeError(
- "Shape of the pitch-defining element is not a Square or Hex it is {}, cannot plot for this type of block".format(
- comp.shape
- )
+ "Shape of the pitch-defining element is not a Square or Hex it is "
+ f"{comp.shape}, cannot plot for this type of block."
)
patches.append(derivedPatch)
data.append(material)
@@ -1281,10 +1279,10 @@ def _makeComponentPatch(component, position, cold):
)
elif isinstance(component, Hexagon):
if component.getDimension("ip", cold=cold) != 0:
- innerPoints = numpy.array(
+ innerPoints = np.array(
hexagon.corners(30) * component.getDimension("ip", cold=cold)
)
- outerPoints = numpy.array(
+ outerPoints = np.array(
hexagon.corners(30) * component.getDimension("op", cold=cold)
)
blockPatch = []
@@ -1305,7 +1303,7 @@ def _makeComponentPatch(component, position, cold):
elif isinstance(component, Rectangle):
if component.getDimension("widthInner", cold=cold) != 0:
- innerPoints = numpy.array(
+ innerPoints = np.array(
[
[
x + component.getDimension("widthInner", cold=cold) / 2,
@@ -1326,7 +1324,7 @@ def _makeComponentPatch(component, position, cold):
]
)
- outerPoints = numpy.array(
+ outerPoints = np.array(
[
[
x + component.getDimension("widthOuter", cold=cold) / 2,
@@ -1407,15 +1405,13 @@ def plotBlockDiagram(
if materialName not in materialList:
materialList.append(materialName)
- materialMap = {
- material: ai for ai, material in enumerate(numpy.unique(materialList))
- }
+ materialMap = {material: ai for ai, material in enumerate(np.unique(materialList))}
patches, data, _ = _makeBlockPinPatches(block, cold)
collection = PatchCollection(patches, cmap=cmapName, alpha=1.0)
- allColors = numpy.array(list(materialMap.values()))
- ourColors = numpy.array([materialMap[materialName] for materialName in data])
+ allColors = np.array(list(materialMap.values()))
+ ourColors = np.array([materialMap[materialName] for materialName in data])
collection.set_array(ourColors)
ax.add_collection(collection)
@@ -1427,7 +1423,7 @@ def plotBlockDiagram(
"",
"{}".format(materialName),
)
- for materialName in numpy.unique(data)
+ for materialName in np.unique(data)
]
legend = _createLegend(legendMap, collection, size=50, shape=Rectangle)
pltKwargs = {
diff --git a/armi/utils/properties.py b/armi/utils/properties.py
index 2532b72e3..e4e5f5f69 100644
--- a/armi/utils/properties.py
+++ b/armi/utils/properties.py
@@ -13,14 +13,14 @@
# limitations under the License.
"""This module contains methods for adding properties with custom behaviors to classes."""
-import numpy
+import numpy as np
def areEqual(val1, val2, relativeTolerance=0.0):
hackEqual = numpyHackForEqual(val1, val2)
if hackEqual or not relativeTolerance: # takes care of dictionaries and strings.
return hackEqual
- return numpy.allclose(
+ return np.allclose(
val1, val2, rtol=relativeTolerance, atol=0.0
) # does not work for dictionaries or strings
@@ -28,10 +28,13 @@ def areEqual(val1, val2, relativeTolerance=0.0):
def numpyHackForEqual(val1, val2):
"""Checks lots of types for equality like strings and dicts."""
# when doing this with numpy arrays you get an array of booleans which causes the value error
- notEqual = val1 != val2
+ if isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray):
+ if val1.size != val2.size:
+ return False
+ notEqual = val1 != val2
try: # should work for everything but numpy arrays
- if isinstance(notEqual, numpy.ndarray) and notEqual.size == 0:
+ if isinstance(notEqual, np.ndarray) and notEqual.size == 0:
return True
return not notEqual.__bool__()
except (AttributeError, ValueError): # from comparing 2 numpy arrays
diff --git a/armi/utils/reportPlotting.py b/armi/utils/reportPlotting.py
index e0b5ced3b..28433444d 100644
--- a/armi/utils/reportPlotting.py
+++ b/armi/utils/reportPlotting.py
@@ -29,13 +29,13 @@
import math
import os
-from matplotlib import cm
+from matplotlib import colormaps
from matplotlib import colors as mpltcolors
import matplotlib.path
import matplotlib.projections.polar
import matplotlib.pyplot as plt
import matplotlib.spines
-import numpy
+import numpy as np
from armi import runLog
from armi import settings
@@ -60,15 +60,13 @@ def plotReactorPerformance(reactor, dbi, buGroups, extension=None, history=None)
The burnup groups in the problem
extension : str, optional
- The file extention for saving plots
+ The file extension for saving plots
history: armi.bookkeeping.historyTracker.HistoryTrackerInterface object
The history tracker interface
"""
try:
- data = dbi.getHistory(
- reactor, params=["cycle", "time", "eFeedMT", "eSWU", "eFuelCycleCost"]
- )
+ data = dbi.getHistory(reactor, params=["cycle", "time", "eFeedMT", "eSWU"])
data.update(
dbi.getHistory(
reactor.core,
@@ -122,7 +120,7 @@ def plotReactorPerformance(reactor, dbi, buGroups, extension=None, history=None)
def valueVsTime(name, x, y, key, yaxis, title, ymin=None, extension=None):
- r"""
+ """
Plots a value vs. time with a standard graph format.
Parameters
@@ -143,7 +141,7 @@ def valueVsTime(name, x, y, key, yaxis, title, ymin=None, extension=None):
The minimum y-axis value. If any ordinates are less than this value,
it will be ignored.
extension : str, optional
- The file extention for saving the figure
+ The file extension for saving the figure
"""
extension = extension or settings.Settings()["outputFileExtension"]
@@ -167,7 +165,7 @@ def valueVsTime(name, x, y, key, yaxis, title, ymin=None, extension=None):
def keffVsTime(name, time, keff, keffUnc=None, ymin=None, extension=None):
- r"""
+ """
Plots core keff vs. time.
Parameters
@@ -183,7 +181,7 @@ def keffVsTime(name, time, keff, keffUnc=None, ymin=None, extension=None):
ymin : float, optional
Minimum y-axis value to target.
extension : str, optional
- The file extention for saving the figure
+ The file extension for saving the figure
"""
extension = extension or settings.Settings()["outputFileExtension"]
@@ -228,7 +226,7 @@ def buVsTime(name, scalars, extension=None):
scalars : dict
Scalar values for this case
extension : str, optional
- The file extention for saving the figure
+ The file extension for saving the figure
"""
extension = extension or settings.Settings()["outputFileExtension"]
@@ -280,11 +278,11 @@ def xsHistoryVsTime(name, history, buGroups, extension=None):
buGroups : list of float
The burnup groups in the problem
extension : str, optional
- The file extention for saving the figure
+ The file extension for saving the figure
"""
extension = extension or settings.Settings()["outputFileExtension"]
- if not history.xsHistory:
+ if history is None or not history.xsHistory:
return
colors = itertools.cycle(["b", "g", "r", "c", "m", "y", "k"])
@@ -307,7 +305,7 @@ def xsHistoryVsTime(name, history, buGroups, extension=None):
plt.legend()
plt.title("Block burnups used to generate XS for {0}".format(name))
plt.xlabel("Time (years)")
- plt.ylabel("Burnup (% FIMA)")
+ plt.ylabel(r"Burnup (% FIMA)")
plt.ylim(0, maxbu * 1.05)
figName = name + ".bugroups." + extension
@@ -317,8 +315,8 @@ def xsHistoryVsTime(name, history, buGroups, extension=None):
def movesVsCycle(name, scalars, extension=None):
- r"""
- make a bar chart showing the number of moves per cycle in the full core.
+ """
+ Make a bar chart showing the number of moves per cycle in the full core.
A move is defined as an assembly being picked up, moved, and put down. So if
two assemblies are swapped, that is 2 moves. Note that it does not count
@@ -331,7 +329,7 @@ def movesVsCycle(name, scalars, extension=None):
name : str
reactor.name
extension : str, optional
- The file extention for saving the figure
+ The file extension for saving the figure
See Also
--------
@@ -398,7 +396,7 @@ def plotCoreOverviewRadar(reactors, reactorNames=None):
]
)
)
- physicsVals = numpy.array(physicsVals)
+ physicsVals = np.array(physicsVals)
theta = thetas.get(physicsName)
if theta is None:
# first time through. Build the radar, store the axis
@@ -419,11 +417,11 @@ def plotCoreOverviewRadar(reactors, reactorNames=None):
plt.rgrids([0.2, 0.4, 0.6, 0.8]) # radial grid lines
else:
ax = axes[physicsName]
- with numpy.errstate(divide="ignore", invalid="ignore"):
+ with np.errstate(divide="ignore", invalid="ignore"):
vals = (
physicsVals / firstReactorVals[physicsName]
) # normalize to first reactor b/c values differ by a lot.
- vals[numpy.isnan(vals)] = 0.2
+ vals[np.isnan(vals)] = 0.2
ax.plot(theta, vals, color=color)
ax.fill(theta, vals, facecolor=color, alpha=0.25)
@@ -562,9 +560,9 @@ def _radarFactory(numVars, frame="circle"):
# calculate evenly-spaced axis angles
# rotate theta such that the first axis is at the top
# keep within 0 to 2pi range though.
- theta = (
- numpy.linspace(0, 2 * numpy.pi, numVars, endpoint=False) + numpy.pi / 2
- ) % (2.0 * numpy.pi)
+ theta = (np.linspace(0, 2 * np.pi, numVars, endpoint=False) + np.pi / 2) % (
+ 2.0 * np.pi
+ )
def drawPolyPatch():
verts = _unitPolyVerts(theta)
@@ -578,8 +576,8 @@ def close_line(line):
"""Closes the input line."""
x, y = line.get_data()
if x[0] != x[-1]:
- x = numpy.concatenate((x, [x[0]]))
- y = numpy.concatenate((y, [y[0]]))
+ x = np.concatenate((x, [x[0]]))
+ y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
patchDict = {"polygon": drawPolyPatch, "circle": drawCirclePatch}
@@ -611,7 +609,7 @@ def plot(self, *args, **kwargs):
close_line(line)
def set_var_labels(self, labels):
- self.set_thetagrids(numpy.degrees(theta), labels)
+ self.set_thetagrids(np.degrees(theta), labels)
def _gen_axes_patch(self):
return self.draw_patch()
@@ -643,7 +641,7 @@ def _unitPolyVerts(theta):
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0 = y0 = r = 0.5
- verts = list(zip(r * numpy.cos(theta) + x0, r * numpy.sin(theta) + y0))
+ verts = list(zip(r * np.cos(theta) + x0, r * np.sin(theta) + y0))
return verts
@@ -726,10 +724,10 @@ def plotAxialProfile(zVals, dataVals, fName, metadata, nPlot=1, yLog=False):
ax = plt.gca()
if yLog: # plot the axial profiles on a log scale
- dataVals = numpy.log10(abs(dataVals))
+ dataVals = np.log10(abs(dataVals))
if nPlot > 1:
- colormap = cm.get_cmap("jet")
+ colormap = colormaps["jet"]
norm = mpltcolors.Normalize(0, nPlot - 1)
# alternate between line styles to help distinguish neighboring groups (close on the color map)
diff --git a/armi/utils/tabulate.py b/armi/utils/tabulate.py
new file mode 100644
index 000000000..2f78c3ac8
--- /dev/null
+++ b/armi/utils/tabulate.py
@@ -0,0 +1,1646 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+r"""Pretty-print tabular data.
+
+This file started out as the MIT-licensed "tabulate". Though we have made, and will continue to
+make, many arbitrary changes as we need. Thanks to the tabulate team.
+
+https://github.com/astanin/python-tabulate
+
+Usage
+-----
+The module provides just one function, `tabulate`, which takes a list of lists or other tabular data
+type as the first argument, and outputs anicely-formatted plain-text table::
+
+ >>> from armi.utils.tabulate import tabulate
+
+ >>> table = [["Sun",696000,1989100000],["Earth",6371,5973.6],
+ ... ["Moon",1737,73.5],["Mars",3390,641.85]]
+
+ >>> print(tabulate(table))
+ ----- ------ -------------
+ Sun 696000 1.9891e+09
+ Earth 6371 5973.6
+ Moon 1737 73.5
+ Mars 3390 641.85
+ ----- ------ -------------
+
+The following tabular data types are supported:
+
+- list of lists or another iterable of iterables
+- list or another iterable of dicts (keys as columns)
+- dict of iterables (keys as columns)
+- list of dataclasses (field names as columns)
+- two-dimensional NumPy array
+- NumPy record arrays (names as columns)
+
+Table headers
+-------------
+To print nice column headers, supply the second argument (`headers`):
+
+ - `headers` can be an explicit list of column headers
+ - if `headers="firstrow"`, then the first row of data is used
+ - if `headers="keys"`, then dictionary keys or column indices are used
+
+Otherwise a headerless table is produced.
+
+If the number of headers is less than the number of columns, they are supposed to be names of
+the last columns. This is consistent with the plain-text format of R::
+
+ >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
+ ... headers="firstrow"))
+ sex age
+ ----- ----- -----
+ Alice F 24
+ Bob M 19
+
+Column and Headers alignment
+----------------------------
+`tabulate` tries to detect column types automatically, and aligns the values properly. By
+default it aligns decimal points of the numbers (or flushes integer numbers to the right), and
+flushes everything else to the left. Possible column alignments (`numAlign`, `strAlign`) are:
+"right", "center", "left", "decimal" (only for `numAlign`), and None (to disable alignment).
+
+`colGlobalAlign` allows for global alignment of columns, before any specific override from
+ `colAlign`. Possible values are: None (defaults according to coltype), "right", "center",
+ "decimal", "left".
+`colAlign` allows for column-wise override starting from left-most column. Possible values are:
+ "global" (no override), "right", "center", "decimal", "left".
+`headersGlobalAlign` allows for global headers alignment, before any specific override from
+ `headersAlign`. Possible values are: None (follow columns alignment), "right", "center",
+ "left".
+`headersAlign` allows for header-wise override starting from left-most given header. Possible
+ values are: "global" (no override), "same" (follow column alignment), "right", "center",
+ "left".
+
+Note on intended behaviour: If there is no `data`, any column alignment argument is ignored. Hence,
+in this case, header alignment cannot be inferred from column alignment.
+
+Table formats
+-------------
+`intFmt` is a format specification used for columns which contain numeric data without a decimal
+point. This can also be a list or tuple of format strings, one per column.
+
+`floatFmt` is a format specification used for columns which contain numeric data with a decimal
+point. This can also be a list or tuple of format strings, one per column.
+
+`None` values are replaced with a `missingVal` string (like `floatFmt`, this can also be a list
+of values for different columns)::
+
+ >>> print(tabulate([["spam", 1, None],
+ ... ["eggs", 42, 3.14],
+ ... ["other", None, 2.7]], missingVal="?"))
+ ----- -- ----
+ spam 1 ?
+ eggs 42 3.14
+ other ? 2.7
+ ----- -- ----
+
+Various plain-text table formats (`tableFmt`) are supported: 'plain', 'simple', 'grid', 'rst', and
+`tsv`. Variable `tabulateFormats` contains the list of currently supported formats.
+
+"plain" format doesn't use any pseudographics to draw tables, it separates columns with a double
+space::
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "plain"))
+ strings numbers
+ spam 41.9999
+ eggs 451
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="plain"))
+ spam 41.9999
+ eggs 451
+
+"simple" format is like Pandoc simple_tables::
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "simple"))
+ strings numbers
+ --------- ---------
+ spam 41.9999
+ eggs 451
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="simple"))
+ ---- --------
+ spam 41.9999
+ eggs 451
+ ---- --------
+
+"grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables::
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "grid"))
+ +-----------+-----------+
+ | strings | numbers |
+ +===========+===========+
+ | spam | 41.9999 |
+ +-----------+-----------+
+ | eggs | 451 |
+ +-----------+-----------+
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="grid"))
+ +------+----------+
+ | spam | 41.9999 |
+ +------+----------+
+ | eggs | 451 |
+ +------+----------+
+
+"rst" is like a simple table format from reStructuredText; please note that reStructuredText
+accepts also "grid" tables::
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "rst"))
+ ========= =========
+ strings numbers
+ ========= =========
+ spam 41.9999
+ eggs 451
+ ========= =========
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="rst"))
+ ==== ========
+ spam 41.9999
+ eggs 451
+ ==== ========
+
+Number parsing
+--------------
+By default, anything which can be parsed as a number is a number. This ensures numbers represented
+as strings are aligned properly. This can lead to weird results for particular strings such as
+specific git SHAs e.g. "42992e1" will be parsed into the number 429920 and aligned as such.
+
+To completely disable number parsing (and alignment), use `disableNumParse=True`. For more fine
+grained control, a list column indices is used to disable number parsing only on those columns e.g.
+`disableNumParse=[0, 2]` would disable number parsing only on the first and third columns.
+
+Column Widths and Auto Line Wrapping
+------------------------------------
+Tabulate will, by default, set the width of each column to the length of the longest element in that
+column. However, in situations where fields are expected to reasonably be too long to look good as a
+single line, tabulate can help automate word wrapping long fields for you. Use the parameter
+`maxcolwidth` to provide a list of maximal column widths::
+
+ >>> print(tabulate( \
+ [('1', 'John Smith', \
+ 'This is a rather long description that might look better if it is wrapped a bit')], \
+ headers=("Issue Id", "Author", "Description"), \
+ maxColWidths=[None, None, 30], \
+ tableFmt="grid" \
+ ))
+ +------------+------------+-------------------------------+
+ | Issue Id | Author | Description |
+ +============+============+===============================+
+ | 1 | John Smith | This is a rather long |
+ | | | description that might look |
+ | | | better if it is wrapped a bit |
+ +------------+------------+-------------------------------+
+
+Header column width can be specified in a similar way using `maxheadercolwidth`.
+"""
+from collections import namedtuple
+from collections.abc import Iterable, Sized
+from functools import reduce, partial
+from itertools import chain, zip_longest
+from textwrap import TextWrapper
+import dataclasses
+import math
+import re
+
+from armi import runLog
+
+__all__ = ["tabulate", "tabulateFormats"]
+
+
+# minimum extra space in headers
+MIN_PADDING = 2
+
+# Whether or not to preserve leading/trailing whitespace in data.
+PRESERVE_WHITESPACE = False
+
+_DEFAULT_FLOAT_FMT = "g"
+_DEFAULT_INT_FMT = ""
+_DEFAULT_MISSING_VAL = ""
+# default align will be overwritten by "left", "center" or "decimal" depending on the formatter
+_DEFAULT_ALIGN = "default"
+
+# Constant that can be used as part of passed rows to generate a separating line. It is purposely an
+# unprintable character, very unlikely to be used in a table
+SEPARATING_LINE = "\001"
+
+Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
+DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
+
+# A table structure is supposed to be:
+#
+# --- lineabove ---------
+# headerrow
+# --- linebelowheader ---
+# datarow
+# --- linebetweenrows ---
+# ... (more datarows) ...
+# --- linebetweenrows ---
+# last datarow
+# --- linebelow ---------
+#
+# TableFormat's line* elements can be
+#
+# - either None, if the element is not used,
+# - or a Line tuple,
+# - or a function: [col_widths], [col_alignments] -> string.
+#
+# TableFormat's *row elements can be
+#
+# - either None, if the element is not used,
+# - or a DataRow tuple,
+# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
+#
+# padding (an integer) is the amount of white space around data values.
+#
+# withHeaderHide:
+#
+# - either None, to display all table elements unconditionally,
+# - or a list of elements not to be displayed if the table has column headers.
+#
+TableFormat = namedtuple(
+ "TableFormat",
+ [
+ "lineabove",
+ "linebelowheader",
+ "linebetweenrows",
+ "linebelow",
+ "headerrow",
+ "datarow",
+ "padding",
+ "withHeaderHide",
+ ],
+)
+
+
+def _isSeparatingLine(row):
+ rowType = type(row)
+ isSl = (rowType is list or rowType is str) and (
+ (len(row) >= 1 and row[0] == SEPARATING_LINE)
+ or (len(row) >= 2 and row[1] == SEPARATING_LINE)
+ )
+ return isSl
+
+
+def _rstEscapeFirstColumn(rows, headers):
+ def escapeEmpty(val):
+ if isinstance(val, (str, bytes)) and not val.strip():
+ return ".."
+ else:
+ return val
+
+ newHeaders = list(headers)
+ newRows = []
+ if headers:
+ newHeaders[0] = escapeEmpty(headers[0])
+ for row in rows:
+ newRow = list(row)
+ if newRow:
+ newRow[0] = escapeEmpty(row[0])
+ newRows.append(newRow)
+ return newRows, newHeaders
+
+
+_tableFormats = {
+ "armi": TableFormat(
+ lineabove=Line("", "-", " ", ""),
+ linebelowheader=Line("", "-", " ", ""),
+ linebetweenrows=None,
+ linebelow=Line("", "-", " ", ""),
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ withHeaderHide=None,
+ ),
+ "simple": TableFormat(
+ lineabove=Line("", "-", " ", ""),
+ linebelowheader=Line("", "-", " ", ""),
+ linebetweenrows=None,
+ linebelow=Line("", "-", " ", ""),
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ withHeaderHide=["lineabove", "linebelow"],
+ ),
+ "plain": TableFormat(
+ lineabove=None,
+ linebelowheader=None,
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ withHeaderHide=None,
+ ),
+ "grid": TableFormat(
+ lineabove=Line("+", "-", "+", "+"),
+ linebelowheader=Line("+", "=", "+", "+"),
+ linebetweenrows=Line("+", "-", "+", "+"),
+ linebelow=Line("+", "-", "+", "+"),
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ withHeaderHide=None,
+ ),
+ "github": TableFormat(
+ lineabove=Line("|", "-", "|", "|"),
+ linebelowheader=Line("|", "-", "|", "|"),
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ withHeaderHide=["lineabove"],
+ ),
+ "pretty": TableFormat(
+ lineabove=Line("+", "-", "+", "+"),
+ linebelowheader=Line("+", "-", "+", "+"),
+ linebetweenrows=None,
+ linebelow=Line("+", "-", "+", "+"),
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ withHeaderHide=None,
+ ),
+ "psql": TableFormat(
+ lineabove=Line("+", "-", "+", "+"),
+ linebelowheader=Line("|", "-", "+", "|"),
+ linebetweenrows=None,
+ linebelow=Line("+", "-", "+", "+"),
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ withHeaderHide=None,
+ ),
+ "rst": TableFormat(
+ lineabove=Line("", "=", " ", ""),
+ linebelowheader=Line("", "=", " ", ""),
+ linebetweenrows=None,
+ linebelow=Line("", "=", " ", ""),
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ withHeaderHide=None,
+ ),
+ "tsv": TableFormat(
+ lineabove=None,
+ linebelowheader=None,
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("", "\t", ""),
+ datarow=DataRow("", "\t", ""),
+ padding=0,
+ withHeaderHide=None,
+ ),
+}
+
+
+tabulateFormats = list(sorted(_tableFormats.keys()))
+
+# The table formats for which multiline cells will be folded into subsequent table rows. The key is
+# the original format, the value is the format that will be used to represent it.
+multilineFormats = {
+ "armi": "armi",
+ "plain": "plain",
+ "simple": "simple",
+ "grid": "grid",
+ "pretty": "pretty",
+ "psql": "psql",
+ "rst": "rst",
+}
+
+_multilineCodes = re.compile(r"\r|\n|\r\n")
+_multilineCodesBytes = re.compile(b"\r|\n|\r\n")
+
+# Handle ANSI escape sequences for both control sequence introducer (CSI) and operating system
+# command (OSC). Both of these begin with 0x1b (or octal 033), which will be shown below as ESC.
+#
+# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48:
+#
+# CSI: ESC followed by the '[' character (0x5b)
+# Parameter Bytes: 0..n bytes in the range 0x30-0x3f
+# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f
+# Final Byte: a single byte in the range 0x40-0x7e
+#
+# Also include the terminal hyperlink sequences as described here:
+# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
+#
+# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST
+#
+# Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c
+#
+# Where:
+# OSC: ESC followed by the ']' character (0x5d)
+# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123)
+# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://)
+# ST: ESC followed by the '\' character (0x5c)
+_esc = r"\x1b"
+_csi = rf"{_esc}\["
+_osc = rf"{_esc}\]"
+_st = rf"{_esc}\\"
+
+_ansiEscapePat = rf"""
+ (
+ # terminal colors, etc
+ {_csi} # CSI
+ [\x30-\x3f]* # parameter bytes
+ [\x20-\x2f]* # intermediate bytes
+ [\x40-\x7e] # final byte
+ |
+ # terminal hyperlinks
+ {_osc}8; # OSC opening
+ (\w+=\w+:?)* # key=value params list (submatch 2)
+ ; # delimiter
+ ([^{_esc}]+) # URI - anything but ESC (submatch 3)
+ {_st} # ST
+ ([^{_esc}]+) # link text - anything but ESC (submatch 4)
+ {_osc}8;;{_st} # "closing" OSC sequence
+ )
+"""
+_ansiCodes = re.compile(_ansiEscapePat, re.VERBOSE)
+_ansiCodesBytes = re.compile(_ansiEscapePat.encode("utf8"), re.VERBOSE)
+_floatWithThousandsSeparators = re.compile(
+ r"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\.[0-9]*|\.[0-9]+)?$"
+)
+
+
+def _isnumberWithThousandsSeparator(string):
+ """Function to test of a string is a number with a thousands separator.
+
+ >>> _isnumberWithThousandsSeparator(".")
+ False
+ >>> _isnumberWithThousandsSeparator("1")
+ True
+ >>> _isnumberWithThousandsSeparator("1.")
+ True
+ >>> _isnumberWithThousandsSeparator(".1")
+ True
+ >>> _isnumberWithThousandsSeparator("1000")
+ False
+ >>> _isnumberWithThousandsSeparator("1,000")
+ True
+ >>> _isnumberWithThousandsSeparator("1,0000")
+ False
+ >>> _isnumberWithThousandsSeparator(b"1,000.1234")
+ True
+ >>> _isnumberWithThousandsSeparator("+1,000.1234")
+ True
+ >>> _isnumberWithThousandsSeparator("-1,000.1234")
+ True
+ """
+ try:
+ string = string.decode()
+ except (UnicodeDecodeError, AttributeError):
+ pass
+
+ return bool(re.match(_floatWithThousandsSeparators, string))
+
+
+def _isconvertible(conv, string):
+ try:
+ conv(string)
+ return True
+ except (ValueError, TypeError):
+ return False
+
+
+def _isnumber(string):
+ """Helper function; is this string a number.
+
+ >>> _isnumber("123.45")
+ True
+ >>> _isnumber("123")
+ True
+ >>> _isnumber("spam")
+ False
+ >>> _isnumber("123e45678")
+ False
+ >>> _isnumber("inf")
+ True
+ """
+ if not _isconvertible(float, string):
+ return False
+ elif isinstance(string, (str, bytes)) and (
+ math.isinf(float(string)) or math.isnan(float(string))
+ ):
+ return string.lower() in ["inf", "-inf", "nan"]
+ return True
+
+
+def _isint(string, inttype=int):
+ """Determine if a string is an integer.
+
+ >>> _isint("123")
+ True
+ >>> _isint("123.45")
+ False
+ """
+ return (
+ type(string) is inttype
+ or (
+ (hasattr(string, "is_integer") or hasattr(string, "__array__"))
+ and str(type(string)).startswith(">> _isbool(True)
+ True
+ >>> _isbool("False")
+ True
+ >>> _isbool(1)
+ False
+ """
+ return type(string) is bool or (
+ isinstance(string, (bytes, str)) and string in ("True", "False")
+ )
+
+
+def _type(string, hasInvisible=True, numparse=True):
+ r"""The least generic type (type(None), int, float, str, unicode).
+
+ >>> _type(None) is type(None)
+ True
+ >>> _type("foo") is type("")
+ True
+ >>> _type("1") is type(1)
+ True
+ >>> _type('\x1b[31m42\x1b[0m') is type(42)
+ True
+ >>> _type('\x1b[31m42\x1b[0m') is type(42)
+ True
+
+ """
+ if hasInvisible and isinstance(string, (str, bytes)):
+ string = _stripAnsi(string)
+
+ if string is None:
+ return type(None)
+ elif hasattr(string, "isoformat"):
+ # datetime.datetime, date, and time
+ return str
+ elif _isbool(string):
+ return bool
+ elif _isint(string) and numparse:
+ return int
+ elif _isnumber(string) and numparse:
+ return float
+ elif isinstance(string, bytes):
+ return bytes
+ else:
+ return str
+
+
+def _afterpoint(string):
+ """Symbols after a decimal point, -1 if the string lacks the decimal point.
+
+ >>> _afterpoint("123.45")
+ 2
+ >>> _afterpoint("1001")
+ -1
+ >>> _afterpoint("eggs")
+ -1
+ >>> _afterpoint("123e45")
+ 2
+ >>> _afterpoint("123,456.78")
+ 2
+
+ """
+ if _isnumber(string) or _isnumberWithThousandsSeparator(string):
+ if _isint(string):
+ return -1
+ else:
+ pos = string.rfind(".")
+ pos = string.lower().rfind("e") if pos < 0 else pos
+ if pos >= 0:
+ return len(string) - pos - 1
+ else:
+ # no point
+ return -1
+ else:
+ # not a number
+ return -1
+
+
+def _padleft(width, s):
+ r"""Flush right.
+
+ >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
+ True
+
+ """
+ fmt = "{0:>%ds}" % width
+ return fmt.format(s)
+
+
+def _padright(width, s):
+ r"""Flush left.
+
+ >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
+ True
+
+ """
+ fmt = "{0:<%ds}" % width
+ return fmt.format(s)
+
+
+def _padboth(width, s):
+ r"""Center string.
+
+ >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
+ True
+
+ """
+ fmt = "{0:^%ds}" % width
+ return fmt.format(s)
+
+
+def _padnone(ignoreWidth, s):
+ return s
+
+
+def _stripAnsi(s):
+ r"""Remove ANSI escape sequences, both CSI and OSC hyperlinks.
+
+ CSI sequences are simply removed from the output, while OSC hyperlinks are replaced with the
+ link text. Note: it may be desirable to show the URI instead but this is not supported.
+
+ >>> repr(_stripAnsi('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\'))
+ "'This is a link'"
+
+ >>> repr(_stripAnsi('\x1b[31mred\x1b[0m text'))
+ "'red text'"
+
+ """
+ if isinstance(s, str):
+ return _ansiCodes.sub(r"\4", s)
+ else: # a bytestring
+ return _ansiCodesBytes.sub(r"\4", s)
+
+
+def _visibleWidth(s):
+ r"""Visible width of a printed string.
+
+ >>> _visibleWidth('\x1b[31mhello\x1b[0m'), _visibleWidth("world")
+ (5, 5)
+
+ """
+ if isinstance(s, (str, bytes)):
+ return len(_stripAnsi(s))
+ else:
+ return len(str(s))
+
+
+def _isMultiline(s):
+ if isinstance(s, str):
+ return bool(re.search(_multilineCodes, s))
+ else:
+ # a bytestring
+ return bool(re.search(_multilineCodesBytes, s))
+
+
+def _multilineWidth(multilineS, lineWidthFn=len):
+ """Visible width of a potentially multiline content."""
+ return max(map(lineWidthFn, re.split("[\r\n]", multilineS)))
+
+
+def _chooseWidthFn(hasInvisible, isMultiline):
+ """Return a function to calculate visible cell width."""
+ if hasInvisible:
+ lineWidthFn = _visibleWidth
+ else:
+ lineWidthFn = len
+
+ if isMultiline:
+ widthFn = lambda s: _multilineWidth(s, lineWidthFn)
+ else:
+ widthFn = lineWidthFn
+
+ return widthFn
+
+
+def _alignColumnChoosePadfn(strings, alignment, hasInvisible):
+ if alignment == "right":
+ if not PRESERVE_WHITESPACE:
+ strings = [s.strip() for s in strings]
+ padfn = _padleft
+ elif alignment == "center":
+ if not PRESERVE_WHITESPACE:
+ strings = [s.strip() for s in strings]
+ padfn = _padboth
+ elif alignment == "decimal":
+ if hasInvisible:
+ decimals = [_afterpoint(_stripAnsi(s)) for s in strings]
+ else:
+ decimals = [_afterpoint(s) for s in strings]
+ maxdecimals = max(decimals)
+ strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)]
+ padfn = _padleft
+ elif not alignment:
+ padfn = _padnone
+ else:
+ if not PRESERVE_WHITESPACE:
+ strings = [s.strip() for s in strings]
+ padfn = _padright
+ return strings, padfn
+
+
+def _alignColumnChooseWidthFn(hasInvisible, isMultiline):
+ if hasInvisible:
+ lineWidthFn = _visibleWidth
+ else:
+ lineWidthFn = len
+
+ if isMultiline:
+ widthFn = lambda s: _alignColumnMultilineWidth(s, lineWidthFn)
+ else:
+ widthFn = lineWidthFn
+
+ return widthFn
+
+
+def _alignColumnMultilineWidth(multilineS, lineWidthFn=len):
+ """Visible width of a potentially multiline content."""
+ return list(map(lineWidthFn, re.split("[\r\n]", multilineS)))
+
+
+def _flatList(nestedList):
+ ret = []
+ for item in nestedList:
+ if isinstance(item, list):
+ for subitem in item:
+ ret.append(subitem)
+ else:
+ ret.append(item)
+ return ret
+
+
+def _alignColumn(strings, alignment, minwidth=0, hasInvisible=True, isMultiline=False):
+ """[string] -> [padded_string]."""
+ strings, padfn = _alignColumnChoosePadfn(strings, alignment, hasInvisible)
+ widthFn = _alignColumnChooseWidthFn(hasInvisible, isMultiline)
+
+ sWidths = list(map(widthFn, strings))
+ maxwidth = max(max(_flatList(sWidths)), minwidth)
+ if isMultiline:
+ if not hasInvisible:
+ paddedStrings = [
+ "\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
+ for ms in strings
+ ]
+ else:
+ # enable wide-character width corrections
+ sLens = [[len(s) for s in re.split("[\r\n]", ms)] for ms in strings]
+ visibleWidths = [
+ [maxwidth - (w - ll) for w, ll in zip(mw, ml)]
+ for mw, ml in zip(sWidths, sLens)
+ ]
+ # wcswidth and _visibleWidth don't count invisible characters;
+ # padfn doesn't need to apply another correction
+ paddedStrings = [
+ "\n".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)])
+ for ms, mw in zip(strings, visibleWidths)
+ ]
+ else: # single-line cell values
+ if not hasInvisible:
+ paddedStrings = [padfn(maxwidth, s) for s in strings]
+ else:
+ # enable wide-character width corrections
+ sLens = list(map(len, strings))
+ visibleWidths = [maxwidth - (w - ll) for w, ll in zip(sWidths, sLens)]
+ # wcswidth and _visibleWidth don't count invisible characters;
+ # padfn doesn't need to apply another correction
+ paddedStrings = [padfn(w, s) for s, w in zip(strings, visibleWidths)]
+
+ return paddedStrings
+
+
+def _moreGeneric(type1, type2):
+ types = {
+ type(None): 0,
+ bool: 1,
+ int: 2,
+ float: 3,
+ bytes: 4,
+ str: 5,
+ }
+ invtypes = {
+ 5: str,
+ 4: bytes,
+ 3: float,
+ 2: int,
+ 1: bool,
+ 0: type(None),
+ }
+ moregeneric = max(types.get(type1, 5), types.get(type2, 5))
+ return invtypes[moregeneric]
+
+
+def _columnType(strings, hasInvisible=True, numparse=True):
+ r"""The least generic type all column values are convertible to.
+
+ >>> _columnType([True, False]) is bool
+ True
+ >>> _columnType(["1", "2"]) is int
+ True
+ >>> _columnType(["1", "2.3"]) is float
+ True
+ >>> _columnType(["1", "2.3", "four"]) is str
+ True
+ >>> _columnType(["four", '\u043f\u044f\u0442\u044c']) is str
+ True
+ >>> _columnType([None, "brux"]) is str
+ True
+ >>> _columnType([1, 2, None]) is int
+ True
+ >>> import datetime as dt
+ >>> _columnType([dt.datetime(1991,2,19), dt.time(17,35)]) is str
+ True
+
+ """
+ types = [_type(s, hasInvisible, numparse) for s in strings]
+ return reduce(_moreGeneric, types, bool)
+
+
+def _format(val, valtype, floatFmt, intFmt, missingVal="", hasInvisible=True):
+ r"""Format a value according to its type.
+
+ Unicode is supported::
+
+ >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
+ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
+ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
+ tabulate(tbl, headers=hrow) == good_result
+ True
+
+ """ # noqa
+ if val is None:
+ return missingVal
+
+ if valtype is str:
+ return f"{val}"
+ elif valtype is int:
+ return format(val, intFmt)
+ elif valtype is bytes:
+ try:
+ return str(val, "ascii")
+ except (TypeError, UnicodeDecodeError):
+ return str(val)
+ elif valtype is float:
+ isAColoredNumber = hasInvisible and isinstance(val, (str, bytes))
+ if isAColoredNumber:
+ rawVal = _stripAnsi(val)
+ formattedVal = format(float(rawVal), floatFmt)
+ return val.replace(rawVal, formattedVal)
+ else:
+ return format(float(val), floatFmt)
+ else:
+ return f"{val}"
+
+
+def _alignHeader(
+ header, alignment, width, visibleWidth, isMultiline=False, widthFn=None
+):
+ """Pad string header to width chars given known visibleWidth of the header."""
+ if isMultiline:
+ headerLines = re.split(_multilineCodes, header)
+ paddedLines = [
+ _alignHeader(h, alignment, width, widthFn(h)) for h in headerLines
+ ]
+ return "\n".join(paddedLines)
+ # else: not multiline
+ ninvisible = len(header) - visibleWidth
+ width += ninvisible
+ if alignment == "left":
+ return _padright(width, header)
+ elif alignment == "center":
+ return _padboth(width, header)
+ elif not alignment:
+ return f"{header}"
+ else:
+ return _padleft(width, header)
+
+
+def _removeSeparatingLines(rows):
+ if type(rows) is list:
+ separatingLines = []
+ sansRows = []
+ for index, row in enumerate(rows):
+ if _isSeparatingLine(row):
+ separatingLines.append(index)
+ else:
+ sansRows.append(row)
+ return sansRows, separatingLines
+ else:
+ return rows, None
+
+
+def _reinsertSeparatingLines(rows, separatingLines):
+ if separatingLines:
+ for index in separatingLines:
+ rows.insert(index, SEPARATING_LINE)
+
+
+def _prependRowIndex(rows, index):
+ """Add a left-most index column."""
+ if index is None or index is False:
+ return rows
+ if isinstance(index, Sized) and len(index) != len(rows):
+ raise ValueError(
+ "index must be as long as the number of data rows: "
+ + "len(index)={} len(rows)={}".format(len(index), len(rows))
+ )
+ sansRows, separatingLines = _removeSeparatingLines(rows)
+ newRows = []
+ indexIter = iter(index)
+ for row in sansRows:
+ indexV = next(indexIter)
+ newRows.append([indexV] + list(row))
+ rows = newRows
+ _reinsertSeparatingLines(rows, separatingLines)
+ return rows
+
+
+def _bool(val):
+ """A wrapper around standard bool() which doesn't throw on NumPy arrays."""
+ try:
+ return bool(val)
+ except ValueError:
+ # val is likely to be a numpy array with many elements
+ return False
+
+
+def _normalizeTabularData(data, headers, showIndex="default"):
+ """Transform a supported data type to a list of lists & a list of headers, with header padding.
+
+ Supported tabular data types:
+
+ * list-of-lists or another iterable of iterables
+ * list of named tuples (usually used with headers="keys")
+ * list of dicts (usually used with headers="keys")
+ * list of OrderedDicts (usually used with headers="keys")
+ * list of dataclasses (Python 3.7+ only, usually used with headers="keys")
+ * 2D NumPy arrays
+ * NumPy record arrays (usually used with headers="keys")
+ * dict of iterables (usually used with headers="keys")
+
+ The first row can be used as headers if headers="firstrow", column indices can be used as
+ headers if headers="keys".
+
+ If showIndex="always", show row indices for all types of data.
+ If showIndex="never", don't show row indices for all types of data.
+ If showIndex is an iterable, show its values as row indices.
+ """
+ try:
+ bool(headers)
+ except ValueError:
+ # numpy.ndarray, ...
+ headers = list(headers)
+
+ index = None
+ if hasattr(data, "keys"):
+ # dict-like
+ keys = data.keys()
+
+ # fill out default values, to ensure all data lists are the same length
+ vals = list(data.values())
+ maxLen = max([len(v) for v in vals], default=0)
+ vals = [[v for v in vv] + [None] * (maxLen - len(vv)) for vv in vals]
+ rows = [tuple(v[i] for v in vals) for i in range(maxLen)]
+
+ if headers == "keys":
+ # headers should be strings
+ headers = list(map(str, keys))
+ else:
+ # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses
+ rows = list(data)
+
+ if headers == "keys" and not rows:
+ # an empty table
+ headers = []
+ elif (
+ headers == "keys"
+ and hasattr(data, "dtype")
+ and getattr(data.dtype, "names")
+ ):
+ # numpy record array
+ headers = data.dtype.names
+ elif (
+ headers == "keys"
+ and len(rows) > 0
+ and isinstance(rows[0], tuple)
+ and hasattr(rows[0], "_fields")
+ ):
+ # namedtuple
+ headers = list(map(str, rows[0]._fields))
+ elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"):
+ # dict-like object
+ uniqKeys = set() # implements hashed lookup
+ keys = [] # storage for set
+ if headers == "firstrow":
+ firstdict = rows[0] if len(rows) > 0 else {}
+ keys.extend(firstdict.keys())
+ uniqKeys.update(keys)
+ rows = rows[1:]
+ for row in rows:
+ for k in row.keys():
+ # Save unique items in input order
+ if k not in uniqKeys:
+ keys.append(k)
+ uniqKeys.add(k)
+ if headers == "keys":
+ headers = keys
+ elif isinstance(headers, dict):
+ # a dict of headers for a list of dicts
+ headers = [headers.get(k, k) for k in keys]
+ headers = list(map(str, headers))
+ elif headers == "firstrow":
+ if len(rows) > 0:
+ headers = [firstdict.get(k, k) for k in keys]
+ headers = list(map(str, headers))
+ else:
+ headers = []
+ elif headers:
+ raise ValueError(
+ "headers for a list of dicts is not a dict or a keyword"
+ )
+ rows = [[row.get(k) for k in keys] for row in rows]
+ elif len(rows) > 0 and dataclasses.is_dataclass(rows[0]):
+ # Python 3.7+'s dataclass
+ fieldNames = [field.name for field in dataclasses.fields(rows[0])]
+ if headers == "keys":
+ headers = fieldNames
+ rows = [[getattr(row, f) for f in fieldNames] for row in rows]
+ elif headers == "keys" and len(rows) > 0:
+ # keys are column indices
+ headers = list(map(str, range(len(rows[0]))))
+
+ # take headers from the first row if necessary
+ if headers == "firstrow" and len(rows) > 0:
+ if index is not None:
+ headers = [index[0]] + list(rows[0])
+ index = index[1:]
+ else:
+ headers = rows[0]
+ headers = list(map(str, headers)) # headers should be strings
+ rows = rows[1:]
+ elif headers == "firstrow":
+ headers = []
+
+ headers = list(map(str, headers))
+ rows = list(map(lambda r: r if _isSeparatingLine(r) else list(r), rows))
+
+ # add or remove an index column
+ showIndexIsSStr = type(showIndex) in [str, bytes]
+ if showIndex == "default" and index is not None:
+ rows = _prependRowIndex(rows, index)
+ elif isinstance(showIndex, Sized) and not showIndexIsSStr:
+ rows = _prependRowIndex(rows, list(showIndex))
+ elif isinstance(showIndex, Iterable) and not showIndexIsSStr:
+ rows = _prependRowIndex(rows, showIndex)
+ elif showIndex == "always" or (_bool(showIndex) and not showIndexIsSStr):
+ if index is None:
+ index = list(range(len(rows)))
+ rows = _prependRowIndex(rows, index)
+
+ # pad with empty headers for initial columns if necessary
+ headersPad = 0
+ if headers and len(rows) > 0:
+ headersPad = max(0, len(rows[0]) - len(headers))
+ headers = [""] * headersPad + headers
+
+ return rows, headers, headersPad
+
+
+def _wrapTextToColWidths(listOfLists, colwidths, numparses=True):
+ if len(listOfLists):
+ numCols = len(listOfLists[0])
+ else:
+ numCols = 0
+
+ numparses = _expandIterable(numparses, numCols, True)
+ result = []
+
+ for row in listOfLists:
+ newRow = []
+ for cell, width, numparse in zip(row, colwidths, numparses):
+ if _isnumber(cell) and numparse:
+ newRow.append(cell)
+ continue
+
+ if width is not None:
+ wrapper = TextWrapper(width=width)
+ # Cast based on our internal type handling. Any future custom formatting of types
+ # (such as datetimes) may need to be more explicit than just `str` of the object
+ castedCell = (
+ str(cell) if _isnumber(cell) else _type(cell, numparse)(cell)
+ )
+ wrapped = [
+ "\n".join(wrapper.wrap(line))
+ for line in castedCell.splitlines()
+ if line.strip() != ""
+ ]
+ newRow.append("\n".join(wrapped))
+ else:
+ newRow.append(cell)
+ result.append(newRow)
+
+ return result
+
+
+def _toStr(s, encoding="utf8", errors="ignore"):
+ """
+ A type safe wrapper for converting a bytestring to str.
+
+ This is essentially just a wrapper around .decode() intended for use with things like map(), but
+ with some specific behavior:
+
+ 1. if the given parameter is not a bytestring, it is returned unmodified
+ 2. decode() is called for the given parameter and assumes utf8 encoding, but the default error
+ behavior is changed from 'strict' to 'ignore'
+
+ >>> repr(_toStr(b'foo'))
+ "'foo'"
+
+ >>> repr(_toStr('foo'))
+ "'foo'"
+
+ >>> repr(_toStr(42))
+ "'42'"
+
+ """
+ if isinstance(s, bytes):
+ return s.decode(encoding=encoding, errors=errors)
+ return str(s)
+
+
+def tabulate(
+ data,
+ headers=(),
+ tableFmt="simple",
+ floatFmt=_DEFAULT_FLOAT_FMT,
+ intFmt=_DEFAULT_INT_FMT,
+ numAlign=_DEFAULT_ALIGN,
+ strAlign=_DEFAULT_ALIGN,
+ missingVal=_DEFAULT_MISSING_VAL,
+ showIndex="default",
+ disableNumParse=False,
+ colGlobalAlign=None,
+ colAlign=None,
+ maxColWidths=None,
+ headersGlobalAlign=None,
+ headersAlign=None,
+ rowAlign=None,
+ maxHeaderColWidths=None,
+):
+ """Format a fixed width table for pretty printing.
+
+ Parameters
+ ----------
+ data : object
+ The tabular data you want to print. This can be a list-of-lists/iterables, dict-of-lists/
+ iterables, 2D numpy arrays, or list of dataclasses.
+ headers=(), optional
+ Nice column names. If this is "firstrow", the first row of the data will be used. If it is
+ "keys"m, then dictionary keys or column indices are used.
+ tableFmt : str, optional
+ There are custom table formats defined in this file, and you can choose between them with
+ this string: "armi", "simple", "plain", "grid", "github", "pretty", "psql", "rst", "tsv".
+ floatFmt : str, optional
+ A format specification used for columns which contain numeric data with a decimal point.
+ This can also be a list or tuple of format strings, one per column.
+ intFmt : str, optional
+ A format specification used for columns which contain numeric data without a decimal point.
+ This can also be a list or tuple of format strings, one per column.
+ numAlign : str, optional
+ Specially align numbers, options: "right", "center", "left", "decimal".
+ strAlign : str, optional
+ Specially align strings, options: "right", "center", "left".
+ missingVal : str, optional
+ `None` values are replaced with a `missingVal` string.
+ showIndex : str, optional
+ Show these rows of data. If "always", show row indices for all types of data. If "never",
+ don't show row indices for all types of data. If showIndex is an iterable, show its values..
+ disableNumParse : bool, optional
+ To disable number parsing (and alignment), use `disableNumParse=True`. For more fine grained
+ control, `[0, 2]` would disable number parsing on the first and third columns.
+ colGlobalAlign : str, optional
+ Allows for global alignment of columns, before any specific override from `colAlign`.
+ Possible values are: None, "right", "center", "decimal", "left".
+ colAlign : str, optional
+ Allows for column-wise override starting from left-most column. Possible values are:
+ "global" (no override), "right", "center", "decimal", "left".
+ maxColWidths : list, optional
+ A list of the maximum column widths.
+ headersGlobalAlign : str, optional
+ Allows for global headers alignment, before any specific override from `headersAlign`.
+ Possible values are: None (follow columns alignment), "right", "center", "left".
+ headersAlign : str, optional
+ Allows for header-wise override starting from left-most given header. Possible values are:
+ "global" (no override), "same" (follow column alignment), "right", "center", "left".
+ rowAlign : str, optional
+ How do you want to align rows: "right", "center", "decimal", "left".
+ maxHeaderColWidths : list, optional
+ List of column widths for the header.
+
+ Returns
+ -------
+ str
+ A text representation of the tabular data.
+ """
+ if data is None:
+ data = []
+
+ listOfLists, headers, headersPad = _normalizeTabularData(
+ data, headers, showIndex=showIndex
+ )
+ listOfLists, separatingLines = _removeSeparatingLines(listOfLists)
+
+ if maxColWidths is not None:
+ if len(listOfLists):
+ numCols = len(listOfLists[0])
+ else:
+ numCols = 0
+ if isinstance(maxColWidths, int): # Expand scalar for all columns
+ maxColWidths = _expandIterable(maxColWidths, numCols, maxColWidths)
+ else: # Ignore col width for any 'trailing' columns
+ maxColWidths = _expandIterable(maxColWidths, numCols, None)
+
+ numparses = _expandNumparse(disableNumParse, numCols)
+ listOfLists = _wrapTextToColWidths(
+ listOfLists, maxColWidths, numparses=numparses
+ )
+
+ if maxHeaderColWidths is not None:
+ numCols = len(listOfLists[0])
+ if isinstance(maxHeaderColWidths, int): # Expand scalar for all columns
+ maxHeaderColWidths = _expandIterable(
+ maxHeaderColWidths, numCols, maxHeaderColWidths
+ )
+ else: # Ignore col width for any 'trailing' columns
+ maxHeaderColWidths = _expandIterable(maxHeaderColWidths, numCols, None)
+
+ numparses = _expandNumparse(disableNumParse, numCols)
+ headers = _wrapTextToColWidths(
+ [headers], maxHeaderColWidths, numparses=numparses
+ )[0]
+
+ # empty values in the first column of RST tables should be escaped
+ # "" should be escaped as "\\ " or ".."
+ if tableFmt == "rst":
+ listOfLists, headers = _rstEscapeFirstColumn(listOfLists, headers)
+
+ # Pretty table formatting does not use any extra padding. Numbers are not parsed and are treated
+ # the same as strings for alignment. Check if pretty is the format being used and override the
+ # defaults so it does not impact other formats.
+ minPadding = MIN_PADDING
+ if tableFmt == "pretty":
+ minPadding = 0
+ disableNumParse = True
+ numAlign = "center" if numAlign == _DEFAULT_ALIGN else numAlign
+ strAlign = "center" if strAlign == _DEFAULT_ALIGN else strAlign
+ else:
+ numAlign = "decimal" if numAlign == _DEFAULT_ALIGN else numAlign
+ strAlign = "left" if strAlign == _DEFAULT_ALIGN else strAlign
+
+ # optimization: look for ANSI control codes once, enable smart width functions only if a control
+ # code is found
+ #
+ # convert the headers and rows into a single, tab-delimited string ensuring that any bytestrings
+ # are decoded safely (i.e. errors ignored)
+ plainText = "\t".join(
+ chain(
+ # headers
+ map(_toStr, headers),
+ # rows: chain the rows together into a single iterable after mapping the bytestring
+ # conversion to each cell value
+ chain.from_iterable(map(_toStr, row) for row in listOfLists),
+ )
+ )
+
+ hasInvisible = _ansiCodes.search(plainText) is not None
+
+ if (
+ not isinstance(tableFmt, TableFormat)
+ and tableFmt in multilineFormats
+ and _isMultiline(plainText)
+ ):
+ tableFmt = multilineFormats.get(tableFmt, tableFmt)
+ isMultiline = True
+ else:
+ isMultiline = False
+ widthFn = _chooseWidthFn(hasInvisible, isMultiline)
+
+ # format rows and columns, convert numeric values to strings
+ cols = list(zip_longest(*listOfLists))
+ numparses = _expandNumparse(disableNumParse, len(cols))
+ coltypes = [_columnType(col, numparse=np) for col, np in zip(cols, numparses)]
+ if isinstance(floatFmt, str):
+ # old version: just duplicate the string to use in each column
+ floatFormats = len(cols) * [floatFmt]
+ else: # if floatFmt is list, tuple etc we have one per column
+ floatFormats = list(floatFmt)
+ if len(floatFormats) < len(cols):
+ floatFormats.extend((len(cols) - len(floatFormats)) * [_DEFAULT_FLOAT_FMT])
+ if isinstance(intFmt, str):
+ # old version: just duplicate the string to use in each column
+ intFormats = len(cols) * [intFmt]
+ else: # if intFmt is list, tuple etc we have one per column
+ intFormats = list(intFmt)
+ if len(intFormats) < len(cols):
+ intFormats.extend((len(cols) - len(intFormats)) * [_DEFAULT_INT_FMT])
+ if isinstance(missingVal, str):
+ missingVals = len(cols) * [missingVal]
+ else:
+ missingVals = list(missingVal)
+ if len(missingVals) < len(cols):
+ missingVals.extend((len(cols) - len(missingVals)) * [_DEFAULT_MISSING_VAL])
+ cols = [
+ [_format(v, ct, flFmt, intFmt, missV, hasInvisible) for v in c]
+ for c, ct, flFmt, intFmt, missV in zip(
+ cols, coltypes, floatFormats, intFormats, missingVals
+ )
+ ]
+
+ # align columns
+ # first set global alignment
+ if colGlobalAlign is not None: # if global alignment provided
+ aligns = [colGlobalAlign] * len(cols)
+ else: # default
+ aligns = [numAlign if ct in [int, float] else strAlign for ct in coltypes]
+
+ # then specific alignements
+ if colAlign is not None:
+ assert isinstance(colAlign, Iterable)
+ if isinstance(colAlign, str):
+ runLog.warning(
+ f"As a string, `colAlign` is interpreted as {[c for c in colAlign]}. Did you "
+ + f'mean `colGlobalAlign = "{colAlign}"` or `colAlign = ("{colAlign}",)`?'
+ )
+ for idx, align in enumerate(colAlign):
+ if not idx < len(aligns):
+ break
+ elif align != "global":
+ aligns[idx] = align
+ minwidths = (
+ [widthFn(h) + minPadding for h in headers] if headers else [0] * len(cols)
+ )
+ cols = [
+ _alignColumn(c, a, minw, hasInvisible, isMultiline)
+ for c, a, minw in zip(cols, aligns, minwidths)
+ ]
+
+ alignsHeaders = None
+ if headers:
+ # align headers and add headers
+ tCols = cols or [[""]] * len(headers)
+ # first set global alignment
+ if headersGlobalAlign is not None: # if global alignment provided
+ alignsHeaders = [headersGlobalAlign] * len(tCols)
+ else: # default
+ alignsHeaders = aligns or [strAlign] * len(headers)
+ # then specific header alignements
+ if headersAlign is not None:
+ assert isinstance(headersAlign, Iterable)
+ if isinstance(headersAlign, str):
+ runLog.warning(
+ f"As a string, `headersAlign` is interpreted as {[c for c in headersAlign]}. "
+ + f'Did you mean `headersGlobalAlign = "{headersAlign}"` or `headersAlign = '
+ + f'("{headersAlign}",)`?'
+ )
+ for idx, align in enumerate(headersAlign):
+ hidx = headersPad + idx
+ if not hidx < len(alignsHeaders):
+ break
+ elif align == "same" and hidx < len(aligns): # same as column align
+ alignsHeaders[hidx] = aligns[hidx]
+ elif align != "global":
+ alignsHeaders[hidx] = align
+ minwidths = [
+ max(minw, max(widthFn(cl) for cl in c)) for minw, c in zip(minwidths, tCols)
+ ]
+ headers = [
+ _alignHeader(h, a, minw, widthFn(h), isMultiline, widthFn)
+ for h, a, minw in zip(headers, alignsHeaders, minwidths)
+ ]
+ rows = list(zip(*cols))
+ else:
+ minwidths = [max(widthFn(cl) for cl in c) for c in cols]
+ rows = list(zip(*cols))
+
+ if not isinstance(tableFmt, TableFormat):
+ tableFmt = _tableFormats.get(tableFmt, _tableFormats["simple"])
+
+ raDefault = rowAlign if isinstance(rowAlign, str) else None
+ rowAligns = _expandIterable(rowAlign, len(rows), raDefault)
+ _reinsertSeparatingLines(rows, separatingLines)
+
+ return _formatTable(
+ tableFmt,
+ headers,
+ alignsHeaders,
+ rows,
+ minwidths,
+ aligns,
+ isMultiline,
+ rowAligns=rowAligns,
+ )
+
+
+def _expandNumparse(disableNumParse, columnCount):
+ """
+ Return a list of bools of length `columnCount` which indicates whether number parsing should be
+ used on each column.
+
+ If `disableNumParse` is a list of indices, each of those indices are False, and everything else
+ is True. If `disableNumParse` is a bool, then the returned list is all the same.
+ """
+ if isinstance(disableNumParse, Iterable):
+ numparses = [True] * columnCount
+ for index in disableNumParse:
+ numparses[index] = False
+ return numparses
+ else:
+ return [not disableNumParse] * columnCount
+
+
+def _expandIterable(original, numDesired, default):
+ """
+ Expands the `original` argument to return a return a list of length `numDesired`. If `original`
+ is shorter than `numDesired`, it will be padded with the value in `default`.
+
+ If `original` is not a list to begin with (i.e. scalar value) a list of length `numDesired`
+ completely populated with `default` will be returned
+ """
+ if isinstance(original, Iterable) and not isinstance(original, str):
+ return original + [default] * (numDesired - len(original))
+ else:
+ return [default] * numDesired
+
+
+def _padRow(cells, padding):
+ if cells:
+ pad = " " * padding
+ paddedCells = [pad + cell + pad for cell in cells]
+ return paddedCells
+ else:
+ return cells
+
+
+def _buildSimpleRow(paddedCells, rowfmt):
+ """Format row according to DataRow format without padding."""
+ begin, sep, end = rowfmt
+ return (begin + sep.join(paddedCells) + end).rstrip()
+
+
+def _buildRow(paddedCells, colwidths, colAligns, rowfmt):
+ """Return a string which represents a row of data cells."""
+ if not rowfmt:
+ return None
+ if hasattr(rowfmt, "__call__"):
+ return rowfmt(paddedCells, colwidths, colAligns)
+ else:
+ return _buildSimpleRow(paddedCells, rowfmt)
+
+
+def _appendBasicRow(lines, paddedCells, colwidths, colAligns, rowfmt, rowAlign=None):
+ # NOTE: rowAlign is ignored and exists for api compatibility with _appendMultilineRow
+ lines.append(_buildRow(paddedCells, colwidths, colAligns, rowfmt))
+ return lines
+
+
+def _alignCellVeritically(textLines, numLines, columnWidth, rowAlignment):
+ deltaLines = numLines - len(textLines)
+ blank = [" " * columnWidth]
+ if rowAlignment == "bottom":
+ return blank * deltaLines + textLines
+ elif rowAlignment == "center":
+ topDelta = deltaLines // 2
+ bottomDelta = deltaLines - topDelta
+ return topDelta * blank + textLines + bottomDelta * blank
+ else:
+ return textLines + blank * deltaLines
+
+
+def _appendMultilineRow(
+ lines, paddedMultilineCells, paddedWidths, colAligns, rowfmt, pad, rowAlign=None
+):
+ colwidths = [w - 2 * pad for w in paddedWidths]
+ cellsLines = [c.splitlines() for c in paddedMultilineCells]
+ nlines = max(map(len, cellsLines)) # number of lines in the row
+
+ cellsLines = [
+ _alignCellVeritically(cl, nlines, w, rowAlign)
+ for cl, w in zip(cellsLines, colwidths)
+ ]
+ linesCells = [[cl[i] for cl in cellsLines] for i in range(nlines)]
+ for ln in linesCells:
+ paddedLn = _padRow(ln, pad)
+ _appendBasicRow(lines, paddedLn, colwidths, colAligns, rowfmt)
+
+ return lines
+
+
+def _buildLine(colwidths, colAligns, linefmt):
+ """Return a string which represents a horizontal line."""
+ if not linefmt:
+ return None
+ if hasattr(linefmt, "__call__"):
+ return linefmt(colwidths, colAligns)
+ else:
+ begin, fill, sep, end = linefmt
+ cells = [fill * w for w in colwidths]
+ return _buildSimpleRow(cells, (begin, sep, end))
+
+
+def _appendLine(lines, colwidths, colAligns, linefmt):
+ lines.append(_buildLine(colwidths, colAligns, linefmt))
+ return lines
+
+
+def _formatTable(
+ fmt, headers, headersAligns, rows, colwidths, colAligns, isMultiline, rowAligns
+):
+ """Produce a plain-text representation of the table."""
+ lines = []
+ hidden = fmt.withHeaderHide if (headers and fmt.withHeaderHide) else []
+ pad = fmt.padding
+ headerrow = fmt.headerrow
+
+ paddedWidths = [(w + 2 * pad) for w in colwidths]
+ if isMultiline:
+ padRow = lambda row, _: row
+ appendRow = partial(_appendMultilineRow, pad=pad)
+ else:
+ padRow = _padRow
+ appendRow = _appendBasicRow
+
+ paddedHeaders = padRow(headers, pad)
+ paddedRows = [padRow(row, pad) for row in rows]
+
+ if fmt.lineabove and "lineabove" not in hidden:
+ _appendLine(lines, paddedWidths, colAligns, fmt.lineabove)
+
+ if paddedHeaders:
+ appendRow(lines, paddedHeaders, paddedWidths, headersAligns, headerrow)
+ if fmt.linebelowheader and "linebelowheader" not in hidden:
+ _appendLine(lines, paddedWidths, colAligns, fmt.linebelowheader)
+
+ if paddedRows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
+ # initial rows with a line below
+ for row, ralign in zip(paddedRows[:-1], rowAligns):
+ appendRow(lines, row, paddedWidths, colAligns, fmt.datarow, rowAlign=ralign)
+ _appendLine(lines, paddedWidths, colAligns, fmt.linebetweenrows)
+ # the last row without a line below
+ appendRow(
+ lines,
+ paddedRows[-1],
+ paddedWidths,
+ colAligns,
+ fmt.datarow,
+ rowAlign=rowAligns[-1],
+ )
+ else:
+ separatingLine = (
+ fmt.linebetweenrows
+ or fmt.linebelowheader
+ or fmt.linebelow
+ or fmt.lineabove
+ or Line("", "", "", "")
+ )
+ for row in paddedRows:
+ # test to see if either the 1st column or the 2nd column has the SEPARATING_LINE flag
+ if _isSeparatingLine(row):
+ _appendLine(lines, paddedWidths, colAligns, separatingLine)
+ else:
+ appendRow(lines, row, paddedWidths, colAligns, fmt.datarow)
+
+ if fmt.linebelow and "linebelow" not in hidden:
+ _appendLine(lines, paddedWidths, colAligns, fmt.linebelow)
+
+ if headers or rows:
+ return "\n".join(lines)
+ else:
+ return ""
diff --git a/armi/utils/tests/test_directoryChangersMpi.py b/armi/utils/tests/test_directoryChangersMpi.py
new file mode 100644
index 000000000..45377bf0a
--- /dev/null
+++ b/armi/utils/tests/test_directoryChangersMpi.py
@@ -0,0 +1,96 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Test the MpiDirectoryChanger.
+
+These tests will be generally ignored by pytest if you are trying to run
+them in an environment without MPI installed.
+
+To run these tests from the command line, install MPI and mpi4py, and do:
+
+mpiexec -n 2 python -m pytest test_parallel.py
+or
+mpiexec.exe -n 2 python -m pytest test_parallel.py
+"""
+
+import os
+import shutil
+import unittest
+
+from armi import context, mpiActions
+from armi.utils.directoryChangersMpi import MpiDirectoryChanger
+
+# determine if this is a parallel run, and MPI is installed
+MPI_EXE = None
+if shutil.which("mpiexec.exe") is not None:
+ MPI_EXE = "mpiexec.exe"
+elif shutil.which("mpiexec") is not None:
+ MPI_EXE = "mpiexec"
+
+
+class RevealYourDirectory(mpiActions.MpiAction):
+ def invokeHook(self):
+ # make a dir with name corresponding to the rank, that way we can confirm
+ # that all ranks actually executed this code
+ os.mkdir(str(context.MPI_RANK))
+ return True
+
+
+class TestMPI(unittest.TestCase):
+ def setUp(self):
+ self.targetDir = "mpiDir"
+ if context.MPI_RANK == 0:
+ os.mkdir(self.targetDir)
+
+ def tearDown(self):
+ context.MPI_COMM.barrier()
+ if context.MPI_RANK == 0:
+ shutil.rmtree(self.targetDir)
+
+ @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
+ def test_MpiDirectoryChanger(self):
+ # make sure all workers start outside the targetDir
+ self.assertNotIn(self.targetDir, os.getcwd())
+
+ # put the workers in a loop, waiting for command from the main process
+ if context.MPI_RANK != 0:
+ while True:
+ cmd = context.MPI_COMM.bcast(None, root=0)
+ print(cmd)
+ if cmd == "quit":
+ break
+ cmd.invoke(None, None, None)
+
+ # from main, send commands to the workers to move into the targetDir
+ # and then create folders within there
+ if context.MPI_RANK == 0:
+ with MpiDirectoryChanger(self.targetDir):
+ RevealYourDirectory.invokeAsMaster(None, None, None)
+
+ # make the workers exit the waiting loop
+ context.MPI_COMM.bcast("quit", root=0)
+
+ context.MPI_COMM.barrier()
+ if context.MPI_RANK == 0:
+ # from main, confirm that subdirectories were created by all workers
+ for i in range(context.MPI_SIZE):
+ self.assertTrue(
+ os.path.isdir(os.path.join(os.getcwd(), self.targetDir, str(i)))
+ )
+
+ # make sure all workers have moved back out from the targetDir
+ self.assertNotIn(self.targetDir, os.getcwd())
+
+ context.MPI_COMM.barrier()
diff --git a/armi/utils/tests/test_hexagon.py b/armi/utils/tests/test_hexagon.py
index ea0873f87..c3ea046e4 100644
--- a/armi/utils/tests/test_hexagon.py
+++ b/armi/utils/tests/test_hexagon.py
@@ -13,12 +13,16 @@
# limitations under the License.
"""Test hexagon tools."""
import math
+import random
import unittest
from armi.utils import hexagon
class TestHexagon(unittest.TestCase):
+ N_FUZZY_DRAWS: int = 10
+ """Number of random draws to use in some fuzzy testing"""
+
def test_hexagon_area(self):
"""
Area of a hexagon.
@@ -43,3 +47,90 @@ def test_numPositionsInRing(self):
self.assertEqual(hexagon.numPositionsInRing(2), 6)
self.assertEqual(hexagon.numPositionsInRing(3), 12)
self.assertEqual(hexagon.numPositionsInRing(4), 18)
+
+ def test_rotatedCellCenter(self):
+ """Test that location of the center cell is invariant through rotation."""
+ for rot in range(6):
+ self.assertTrue(hexagon.getIndexOfRotatedCell(1, rot), 1)
+
+ def test_rotatedFirstRing(self):
+ """Simple test for the corners of the first ring are maintained during rotation."""
+ # A 60 degree rotation is just incrementing the cell index by one here
+ locations = list(range(2, 8))
+ for locIndex, initialPosition in enumerate(locations):
+ for rot in range(6):
+ actual = hexagon.getIndexOfRotatedCell(initialPosition, rot)
+ newIndex = (locIndex + rot) % 6
+ expectedPosition = locations[newIndex]
+ self.assertEqual(
+ actual, expectedPosition, msg=f"{initialPosition=}, {rot=}"
+ )
+
+ def test_rotateFuzzy(self):
+ """Select some position number and rotation and check for consistency."""
+ N_DRAWS = 100
+ for _ in range(N_DRAWS):
+ self._rotateFuzzyInner()
+
+ def _rotateFuzzyInner(self):
+ rot = random.randint(1, 5)
+ initialCell = random.randint(2, 300)
+ testInfoMsg = f"{rot=}, {initialCell=}"
+ newCell = hexagon.getIndexOfRotatedCell(initialCell, rot)
+ self.assertNotEqual(newCell, initialCell, msg=testInfoMsg)
+ # should be in the same ring
+ initialRing = hexagon.numRingsToHoldNumCells(initialCell)
+ newRing = hexagon.numRingsToHoldNumCells(newCell)
+ self.assertEqual(newRing, initialRing, msg=testInfoMsg)
+ # If we un-rotate, we should get our initial cell
+ reverseRot = (6 - rot) % 6
+ reverseCell = hexagon.getIndexOfRotatedCell(newCell, reverseRot)
+ self.assertEqual(reverseCell, initialCell, msg=testInfoMsg)
+
+ def test_positionsUpToRing(self):
+ """Test totalPositionsUpToRing is consistent with numPositionsInRing."""
+ self.assertEqual(hexagon.totalPositionsUpToRing(1), 1)
+ self.assertEqual(hexagon.totalPositionsUpToRing(2), 7)
+ self.assertEqual(hexagon.totalPositionsUpToRing(3), 19)
+
+ totalPositions = 19
+ for ring in range(4, 30):
+ posInThisRing = hexagon.numPositionsInRing(ring)
+ totalPositions += posInThisRing
+ self.assertEqual(
+ hexagon.totalPositionsUpToRing(ring), totalPositions, msg=f"{ring=}"
+ )
+
+ def test_rotatedCellIndexErrors(self):
+ """Test errors for non-positive initial cell indices during rotation."""
+ self._testNonPosRotIndex(0)
+ for _ in range(self.N_FUZZY_DRAWS):
+ index = random.randint(-100, -1)
+ self._testNonPosRotIndex(index)
+
+ def _testNonPosRotIndex(self, index: int):
+ with self.assertRaisesRegex(ValueError, ".*must be positive", msg=f"{index=}"):
+ hexagon.getIndexOfRotatedCell(index, 0)
+
+ def test_rotatedCellOrientationErrors(self):
+ """Test errors for invalid orientation numbers during rotation."""
+ for _ in range(self.N_FUZZY_DRAWS):
+ upper = random.randint(6, 100)
+ self._testRotOrientation(upper)
+ lower = random.randint(-100, -1)
+ self._testRotOrientation(lower)
+
+ def _testRotOrientation(self, orientation: int):
+ with self.assertRaisesRegex(
+ ValueError, "Orientation number", msg=f"{orientation=}"
+ ):
+ hexagon.getIndexOfRotatedCell(
+ initialCellIndex=1, orientationNumber=orientation
+ )
+
+ def test_indexWithNoRotation(self):
+ """Test that the initial cell location is returned if not rotated."""
+ for _ in range(self.N_FUZZY_DRAWS):
+ ix = random.randint(1, 300)
+ postRotation = hexagon.getIndexOfRotatedCell(ix, orientationNumber=0)
+ self.assertEqual(postRotation, ix)
diff --git a/armi/utils/tests/test_iterables.py b/armi/utils/tests/test_iterables.py
index e681cd0ed..f1ebcb8e0 100644
--- a/armi/utils/tests/test_iterables.py
+++ b/armi/utils/tests/test_iterables.py
@@ -16,6 +16,8 @@
import time
import unittest
+import numpy as np
+
from armi.utils import iterables
# CONSTANTS
@@ -174,3 +176,21 @@ def test_addingSequences(self):
self.assertEqual(vals[0], 0)
self.assertEqual(vals[-1], 5)
self.assertEqual(len(vals), 6)
+
+ def test_listPivot(self):
+ data = list(range(10))
+ loc = 4
+ actual = iterables.pivot(data, loc)
+ self.assertEqual(actual, data[loc:] + data[:loc])
+
+ def test_arrayPivot(self):
+ data = np.arange(10)
+ loc = -7
+ actual = iterables.pivot(data, loc)
+ expected = np.array(iterables.pivot(data.tolist(), loc))
+ self.assertTrue((actual == expected).all(), msg=f"{actual=} != {expected=}")
+ # Catch a silent failure case where pivot doesn't change the iterable
+ self.assertTrue(
+ (actual != data).all(),
+ msg=f"Pre-pivot {data=} should not equal post-pivot {actual=}",
+ )
diff --git a/armi/utils/tests/test_mathematics.py b/armi/utils/tests/test_mathematics.py
index 8b580f907..b7554b07a 100644
--- a/armi/utils/tests/test_mathematics.py
+++ b/armi/utils/tests/test_mathematics.py
@@ -113,10 +113,12 @@ def test_fixThreeDigitExp(self):
self.assertEqual(-2.4594981981654e-101, fixed)
def test_getFloat(self):
- self.assertEqual(getFloat(1.0), 1.0)
- self.assertEqual(getFloat("1.0"), 1.0)
self.assertIsNone(getFloat("word"))
+ for flt in [-9.123 + f * 0.734 for f in range(25)]:
+ self.assertEqual(getFloat(flt), flt)
+ self.assertEqual(getFloat(str(flt)), flt)
+
def test_getStepsFromValues(self):
steps = getStepsFromValues([1.0, 3.0, 6.0, 10.0], prevValue=0.0)
self.assertListEqual(steps, [1.0, 2.0, 3.0, 4.0])
diff --git a/armi/utils/tests/test_plotting.py b/armi/utils/tests/test_plotting.py
index ec7ab50f8..0fdb9e1cc 100644
--- a/armi/utils/tests/test_plotting.py
+++ b/armi/utils/tests/test_plotting.py
@@ -16,6 +16,8 @@
import os
import unittest
+import numpy as np
+
from armi.nuclearDataIO.cccc import isotxs
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
@@ -37,7 +39,9 @@ class TestPlotting(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.o, cls.r = test_reactors.loadTestReactor()
+ cls.o, cls.r = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
def test_plotDepthMap(self): # indirectly tests plot face map
with TemporaryDirectoryChanger():
@@ -125,3 +129,57 @@ def test_plotCartesianBlock(self):
def _checkExists(self, fName):
self.assertTrue(os.path.exists(fName))
+
+
+class TestPatches(unittest.TestCase):
+ """Test the ability to correctly make patches."""
+
+ def test_makeAssemPatches(self):
+ # this one is flats-up with many assemblies in the core
+ _, rHexFlatsUp = test_reactors.loadTestReactor()
+
+ nAssems = len(rHexFlatsUp.core)
+ self.assertGreater(nAssems, 1)
+ patches = plotting._makeAssemPatches(rHexFlatsUp.core)
+ self.assertEqual(len(patches), nAssems)
+
+ # find the patch corresponding to the center assembly
+ for patch in patches:
+ if np.allclose(patch.xy, (0, 0)):
+ break
+
+ vertices = patch.get_verts()
+ # there should be 1 more than the number of points in the shape
+ self.assertEqual(len(vertices), 7)
+ # for flats-up, the first vertex should have a y position of ~zero
+ self.assertAlmostEqual(vertices[0][1], 0)
+
+ # this one is corners-up, with only a single assembly
+ _, rHexCornersUp = test_reactors.loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
+
+ nAssems = len(rHexCornersUp.core)
+ self.assertEqual(nAssems, 1)
+ patches = plotting._makeAssemPatches(rHexCornersUp.core)
+ self.assertEqual(len(patches), 1)
+
+ vertices = patches[0].get_verts()
+ self.assertEqual(len(vertices), 7)
+ # for corners-up, the first vertex should have an x position of ~zero
+ self.assertAlmostEqual(vertices[0][0], 0)
+
+ # this one is cartestian, with many assemblies in the core
+ _, rCartesian = test_reactors.loadTestReactor(
+ inputFileName="refTestCartesian.yaml"
+ )
+
+ nAssems = len(rCartesian.core)
+ self.assertGreater(nAssems, 1)
+ patches = plotting._makeAssemPatches(rCartesian.core)
+ self.assertEqual(nAssems, len(patches))
+
+ # just pick a given patch and ensure that it is square-like. orientation
+ # is not important here.
+ vertices = patches[0].get_verts()
+ self.assertEqual(len(vertices), 5)
diff --git a/armi/utils/tests/test_reportPlotting.py b/armi/utils/tests/test_reportPlotting.py
index 5057857ff..ae2cd6f23 100644
--- a/armi/utils/tests/test_reportPlotting.py
+++ b/armi/utils/tests/test_reportPlotting.py
@@ -35,7 +35,9 @@
class TestRadar(unittest.TestCase):
def setUp(self):
- self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT)
+ self.o, self.r = test_reactors.loadTestReactor(
+ TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
@@ -81,20 +83,20 @@ def test_keffVsTime(self):
# plot with no keff function
keffVsTime(self.r.name, t, t, keffUnc=[], extension=ext)
- self.assertTrue(os.path.exists("R-armiRun.keff.png"))
- self.assertGreater(os.path.getsize("R-armiRun.keff.png"), 0)
+ self.assertTrue(os.path.exists("R-armiRunSmallest.keff.png"))
+ self.assertGreater(os.path.getsize("R-armiRunSmallest.keff.png"), 0)
# plot with a keff function
keffVsTime(self.r.name, t, t, t, extension=ext)
- self.assertTrue(os.path.exists("R-armiRun.keff.png"))
- self.assertGreater(os.path.getsize("R-armiRun.keff.png"), 0)
+ self.assertTrue(os.path.exists("R-armiRunSmallest.keff.png"))
+ self.assertGreater(os.path.getsize("R-armiRunSmallest.keff.png"), 0)
def test_valueVsTime(self):
t = list(range(12))
ext = "png"
valueVsTime(self.r.name, t, t, "val", "yaxis", "title", extension=ext)
- self.assertTrue(os.path.exists("R-armiRun.val.png"))
- self.assertGreater(os.path.getsize("R-armiRun.val.png"), 0)
+ self.assertTrue(os.path.exists("R-armiRunSmallest.val.png"))
+ self.assertGreater(os.path.getsize("R-armiRunSmallest.val.png"), 0)
def test_buVsTime(self):
name = "buvstime"
diff --git a/armi/utils/tests/test_tabulate.py b/armi/utils/tests/test_tabulate.py
new file mode 100644
index 000000000..1876cd250
--- /dev/null
+++ b/armi/utils/tests/test_tabulate.py
@@ -0,0 +1,1726 @@
+# Copyright 2024 TerraPower, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for tabulate.
+
+This file started out as the MIT-licensed "tabulate". Though we have made, and will continue to make
+many arbitrary changes as we need. Thanks to the tabulate team.
+
+https://github.com/astanin/python-tabulate
+"""
+from collections import defaultdict
+from collections import namedtuple
+from collections import OrderedDict
+from collections import UserDict
+from dataclasses import dataclass
+from datetime import datetime
+import unittest
+
+import numpy as np
+
+from armi.utils.tabulate import _alignCellVeritically
+from armi.utils.tabulate import _alignColumn
+from armi.utils.tabulate import _bool
+from armi.utils.tabulate import _buildLine
+from armi.utils.tabulate import _buildRow
+from armi.utils.tabulate import _format
+from armi.utils.tabulate import _isMultiline
+from armi.utils.tabulate import _multilineWidth
+from armi.utils.tabulate import _normalizeTabularData
+from armi.utils.tabulate import _tableFormats
+from armi.utils.tabulate import _type
+from armi.utils.tabulate import _visibleWidth
+from armi.utils.tabulate import _wrapTextToColWidths
+from armi.utils.tabulate import SEPARATING_LINE
+from armi.utils.tabulate import tabulate
+from armi.utils.tabulate import tabulateFormats
+
+
+class TestTabulateAPI(unittest.TestCase):
+ def test_tabulateFormats(self):
+ """API: tabulateFormats is a list of strings."""
+ supported = tabulateFormats
+ self.assertEqual(type(supported), list)
+ for fmt in supported:
+ self.assertEqual(type(fmt), str)
+
+
+class TestTabulateInputs(unittest.TestCase):
+ def test_iterableOfEmpties(self):
+ """Input: test various empty inputs."""
+ ii = iter(map(lambda x: iter(x), []))
+ result = tabulate(ii, "firstrow")
+ self.assertEqual("", result)
+
+ ij = iter(map(lambda x: iter(x), ["abcde"]))
+ expected = "\n".join(
+ [
+ "a b c d e",
+ "--- --- --- --- ---",
+ ]
+ )
+ result = tabulate(ij, "firstrow")
+ self.assertEqual(expected, result)
+
+ ik = iter([])
+ expected = "\n".join(
+ [
+ "a b c",
+ "--- --- ---",
+ ]
+ )
+ result = tabulate(ik, "abc")
+ self.assertEqual(expected, result)
+
+ def test_iterableOfIterables(self):
+ """Input: an interable of iterables."""
+ ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)]))
+ expected = "\n".join(
+ ["- - - - -", "0 1 2 3 4", "5 4 3 2 1", "- - - - -"]
+ )
+ result = tabulate(ii, headersAlign="center")
+ self.assertEqual(expected, result)
+
+ def test_iterableOfIterablesHeaders(self):
+ """Input: an interable of iterables with headers."""
+ ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)]))
+ expected = "\n".join(
+ [
+ " a b c d e",
+ "--- --- --- --- ---",
+ " 0 1 2 3 4",
+ " 5 4 3 2 1",
+ ]
+ )
+ result = tabulate(ii, "abcde")
+ self.assertEqual(expected, result)
+
+ def test_iterableOfIterablesFirstrow(self):
+ """Input: an interable of iterables with the first row as headers."""
+ ii = iter(map(lambda x: iter(x), ["abcde", range(5), range(5, 0, -1)]))
+ expected = "\n".join(
+ [
+ " a b c d e",
+ "--- --- --- --- ---",
+ " 0 1 2 3 4",
+ " 5 4 3 2 1",
+ ]
+ )
+ result = tabulate(ii, "firstrow")
+ self.assertEqual(expected, result)
+
+ def test_listOfLists(self):
+ """Input: a list of lists with headers."""
+ ll = [["a", "one", 1], ["b", "two", None]]
+ expected = "\n".join(
+ [
+ " string number",
+ "-- -------- --------",
+ "a one 1",
+ "b two",
+ ]
+ )
+ result = tabulate(ll, headers=["string", "number"])
+ self.assertEqual(expected, result)
+
+ def test_listOfListsFirstrow(self):
+ """Input: a list of lists with the first row as headers."""
+ ll = [["string", "number"], ["a", "one", 1], ["b", "two", None]]
+ expected = "\n".join(
+ [
+ " string number",
+ "-- -------- --------",
+ "a one 1",
+ "b two",
+ ]
+ )
+ result = tabulate(ll, headers="firstrow")
+ self.assertEqual(expected, result)
+
+ def test_listOfListsKeys(self):
+ """Input: a list of lists with column indices as headers."""
+ ll = [["a", "one", 1], ["b", "two", None]]
+ expected = "\n".join(
+ ["0 1 2", "--- --- ---", "a one 1", "b two"]
+ )
+ result = tabulate(ll, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_dictLike(self):
+ """Input: a dict of iterables with keys as headers."""
+ # columns should be padded with None, keys should be used as headers
+ dd = {"a": range(3), "b": range(101, 105)}
+ # keys' order (hence columns' order) is not deterministic in Python 3
+ # => we have to consider both possible results as valid
+ expected1 = "\n".join(
+ [" a b", "--- ---", " 0 101", " 1 102", " 2 103", " 104"]
+ )
+ result = tabulate(dd, "keys")
+ self.assertEqual(result, expected1)
+
+ def test_numpy2d(self):
+ """Input: a 2D NumPy array with headers."""
+ na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5
+ expected = "\n".join(
+ [
+ " a b c",
+ "----- ----- -----",
+ " 0.5 4 13.5",
+ " 32 62.5 108",
+ "171.5 256 364.5",
+ ]
+ )
+ result = tabulate(na, ["a", "b", "c"])
+ self.assertEqual(expected, result)
+
+ def test_numpy2dFirstrow(self):
+ """Input: a 2D NumPy array with the first row as headers."""
+ na = np.arange(1, 10, dtype=np.int32).reshape((3, 3)) ** 3
+ expected = "\n".join(
+ [" 1 8 27", "--- --- ----", " 64 125 216", "343 512 729"]
+ )
+ result = tabulate(na, headers="firstrow")
+ self.assertEqual(expected, result)
+
+ def test_numpy2dKeys(self):
+ """Input: a 2D NumPy array with column indices as headers."""
+ na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5
+ expected = "\n".join(
+ [
+ " 0 1 2",
+ "----- ----- -----",
+ " 0.5 4 13.5",
+ " 32 62.5 108",
+ "171.5 256 364.5",
+ ]
+ )
+ result = tabulate(na, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_numpyRecordArray(self):
+ """Input: a 2D NumPy record array without header."""
+ na = np.asarray(
+ [("Alice", 23, 169.5), ("Bob", 27, 175.0)],
+ dtype={
+ "names": ["name", "age", "height"],
+ "formats": ["a32", "uint8", "float32"],
+ },
+ )
+ expected = "\n".join(
+ [
+ "----- -- -----",
+ "Alice 23 169.5",
+ "Bob 27 175",
+ "----- -- -----",
+ ]
+ )
+ result = tabulate(na)
+ self.assertEqual(expected, result)
+
+ def test_numpyRecordArrayKeys(self):
+ """Input: a 2D NumPy record array with column names as headers."""
+ na = np.asarray(
+ [("Alice", 23, 169.5), ("Bob", 27, 175.0)],
+ dtype={
+ "names": ["name", "age", "height"],
+ "formats": ["a32", "uint8", "float32"],
+ },
+ )
+ expected = "\n".join(
+ [
+ "name age height",
+ "------ ----- --------",
+ "Alice 23 169.5",
+ "Bob 27 175",
+ ]
+ )
+ result = tabulate(na, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_numpyRecordArrayHeaders(self):
+ """Input: a 2D NumPy record array with user-supplied headers."""
+ na = np.asarray(
+ [("Alice", 23, 169.5), ("Bob", 27, 175.0)],
+ dtype={
+ "names": ["name", "age", "height"],
+ "formats": ["a32", "uint8", "float32"],
+ },
+ )
+ expected = "\n".join(
+ [
+ "person years cm",
+ "-------- ------- -----",
+ "Alice 23 169.5",
+ "Bob 27 175",
+ ]
+ )
+ result = tabulate(na, headers=["person", "years", "cm"])
+ self.assertEqual(expected, result)
+
+ def test_listOfNamedtuples(self):
+ """Input: a list of named tuples with field names as headers."""
+ NT = namedtuple("NT", ["foo", "bar"])
+ lt = [NT(1, 2), NT(3, 4)]
+ expected = "\n".join(["- -", "1 2", "3 4", "- -"])
+ result = tabulate(lt)
+ self.assertEqual(expected, result)
+
+ def test_listOfNamedtuplesKeys(self):
+ """Input: a list of named tuples with field names as headers."""
+ NT = namedtuple("NT", ["foo", "bar"])
+ lt = [NT(1, 2), NT(3, 4)]
+ expected = "\n".join(
+ [" foo bar", "----- -----", " 1 2", " 3 4"]
+ )
+ result = tabulate(lt, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_listOfDicts(self):
+ """Input: a list of dictionaries."""
+ lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}]
+ expected1 = "\n".join(["- -", "1 2", "3 4", "- -"])
+ expected2 = "\n".join(["- -", "2 1", "4 3", "- -"])
+ result = tabulate(lod)
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfUserdicts(self):
+ """Input: a list of UserDicts."""
+ lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)]
+ expected1 = "\n".join(["- -", "1 2", "3 4", "- -"])
+ expected2 = "\n".join(["- -", "2 1", "4 3", "- -"])
+ result = tabulate(lod)
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfDictsKeys(self):
+ """Input: a list of dictionaries, with keys as headers."""
+ lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}]
+ expected1 = "\n".join(
+ [" foo bar", "----- -----", " 1 2", " 3 4"]
+ )
+ expected2 = "\n".join(
+ [" bar foo", "----- -----", " 2 1", " 4 3"]
+ )
+ result = tabulate(lod, headers="keys")
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfUserdictsKeys(self):
+ """Input: a list of UserDicts."""
+ lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)]
+ expected1 = "\n".join(
+ [" foo bar", "----- -----", " 1 2", " 3 4"]
+ )
+ expected2 = "\n".join(
+ [" bar foo", "----- -----", " 2 1", " 4 3"]
+ )
+ result = tabulate(lod, headers="keys")
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfDictsWithMissingKeys(self):
+ """Input: a list of dictionaries, with missing keys."""
+ lod = [{"foo": 1}, {"bar": 2}, {"foo": 4, "baz": 3}]
+ expected = "\n".join(
+ [
+ " foo bar baz",
+ "----- ----- -----",
+ " 1",
+ " 2",
+ " 4 3",
+ ]
+ )
+ result = tabulate(lod, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_listOfDictsFirstrow(self):
+ """Input: a list of dictionaries, with the first dict as headers."""
+ lod = [{"foo": "FOO", "bar": "BAR"}, {"foo": 3, "bar": 4, "baz": 5}]
+ # if some key is missing in the first dict, use the key name instead
+ expected1 = "\n".join(
+ [" FOO BAR baz", "----- ----- -----", " 3 4 5"]
+ )
+ expected2 = "\n".join(
+ [" BAR FOO baz", "----- ----- -----", " 4 3 5"]
+ )
+ result = tabulate(lod, headers="firstrow")
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfDictsWithDictOfHeaders(self):
+ """Input: a dict of user headers for a list of dicts."""
+ table = [{"letters": "ABCDE", "digits": 12345}]
+ headers = {"digits": "DIGITS", "letters": "LETTERS"}
+ expected1 = "\n".join(
+ [" DIGITS LETTERS", "-------- ---------", " 12345 ABCDE"]
+ )
+ expected2 = "\n".join(
+ ["LETTERS DIGITS", "--------- --------", "ABCDE 12345"]
+ )
+ result = tabulate(table, headers=headers)
+ self.assertIn(result, [expected1, expected2])
+
+ def test_listOfDictsWithListOfHeaders(self):
+ """Input: ValueError on a list of headers with a list of dicts."""
+ table = [{"letters": "ABCDE", "digits": 12345}]
+ headers = ["DIGITS", "LETTERS"]
+ with self.assertRaises(ValueError):
+ tabulate(table, headers=headers)
+
+ def test_listOfOrdereddicts(self):
+ """Input: a list of OrderedDicts."""
+ od = OrderedDict([("b", 1), ("a", 2)])
+ lod = [od, od]
+ expected = "\n".join([" b a", "--- ---", " 1 2", " 1 2"])
+ result = tabulate(lod, headers="keys")
+ self.assertEqual(expected, result)
+
+ def test_listBytes(self):
+ """Input: a list of bytes."""
+ lb = [["你好".encode("utf-8")], ["你好"]]
+ expected = "\n".join(
+ [
+ "bytes",
+ "---------------------------",
+ r"b'\xe4\xbd\xa0\xe5\xa5\xbd'",
+ "你好",
+ ]
+ )
+ result = tabulate(lb, headers=["bytes"])
+ self.assertEqual(expected, result)
+
+ def test_tightCouplingExample(self):
+ """Input: Real world-ish example from tight coupling."""
+ # the two examples below should both produce the same output:
+ border = "-- ------------------------------ -------------- ----------------------------"
+ expected = "\n".join(
+ [
+ border,
+ " criticalCrIteration: keffUnc dif3d: power thInterface: THavgCladTemp",
+ border,
+ " 0 9.01234e-05 0.00876543 0.00123456",
+ border,
+ ]
+ )
+
+ # the data is a regular dictionry
+ data = {
+ "criticalCrIteration: keffUnc": [9.01234e-05],
+ "dif3d: power": [0.00876543],
+ "thInterface: THavgCladTemp": [0.00123456],
+ }
+ result = tabulate(data, headers="keys", showIndex=True, tableFmt="armi")
+ self.assertEqual(expected, result)
+
+ # the data is a defaultdict
+ dataD = defaultdict(list)
+ for key, vals in data.items():
+ for val in vals:
+ dataD[key].append(val)
+
+ result2 = tabulate(dataD, headers="keys", showIndex=True, tableFmt="armi")
+ self.assertEqual(expected, result2)
+
+
+class TestTabulateInternal(unittest.TestCase):
+ def test_alignColumnDecimal(self):
+ """Internal: _align_column(..., 'decimal')."""
+ column = ["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"]
+ result = _alignColumn(column, "decimal")
+ expected = [
+ " 12.345 ",
+ "-1234.5 ",
+ " 1.23 ",
+ " 1234.5 ",
+ " 1e+234 ",
+ " 1.0e234",
+ ]
+ self.assertEqual(expected, result)
+
+ def test_alignColumnDecimalWithThousandSeparators(self):
+ """Internal: _align_column(..., 'decimal')."""
+ column = ["12.345", "-1234.5", "1.23", "1,234.5", "1e+234", "1.0e234"]
+ output = _alignColumn(column, "decimal")
+ expected = [
+ " 12.345 ",
+ "-1234.5 ",
+ " 1.23 ",
+ "1,234.5 ",
+ " 1e+234 ",
+ " 1.0e234",
+ ]
+ self.assertEqual(expected, output)
+
+ def test_alignColumnDecimalWithIncorrectThousandSeparators(self):
+ """Internal: _align_column(..., 'decimal')."""
+ column = ["12.345", "-1234.5", "1.23", "12,34.5", "1e+234", "1.0e234"]
+ output = _alignColumn(column, "decimal")
+ expected = [
+ " 12.345 ",
+ " -1234.5 ",
+ " 1.23 ",
+ "12,34.5 ",
+ " 1e+234 ",
+ " 1.0e234",
+ ]
+ self.assertEqual(expected, output)
+
+ def test_alignColumnNone(self):
+ """Internal: _align_column(..., None)."""
+ column = ["123.4", "56.7890"]
+ output = _alignColumn(column, None)
+ expected = ["123.4", "56.7890"]
+ self.assertEqual(expected, output)
+
+ def test_alignColumnMultiline(self):
+ """Internal: _align_column(..., is_multiline=True)."""
+ column = ["1", "123", "12345\n6"]
+ output = _alignColumn(column, "center", isMultiline=True)
+ expected = [" 1 ", " 123 ", "12345" + "\n" + " 6 "]
+ self.assertEqual(expected, output)
+
+ def test_alignCellVeriticallyOneLineOnly(self):
+ """Internal: Aligning a single height cell is same regardless of alignment value."""
+ lines = ["one line"]
+ column_width = 8
+
+ top = _alignCellVeritically(lines, 1, column_width, "top")
+ center = _alignCellVeritically(lines, 1, column_width, "center")
+ bottom = _alignCellVeritically(lines, 1, column_width, "bottom")
+ none = _alignCellVeritically(lines, 1, column_width, None)
+
+ expected = ["one line"]
+ assert top == center == bottom == none == expected
+
+ def test_alignCellVeriticallyTopSingleTextMultiplePad(self):
+ """Internal: Align single cell text to top."""
+ result = _alignCellVeritically(["one line"], 3, 8, "top")
+ expected = ["one line", " ", " "]
+ self.assertEqual(expected, result)
+
+ def test_alignCellVeriticallyCenterSingleTextMultiplePad(self):
+ """Internal: Align single cell text to center."""
+ result = _alignCellVeritically(["one line"], 3, 8, "center")
+ expected = [" ", "one line", " "]
+ self.assertEqual(expected, result)
+
+ def test_alignCellVeriticallyBottomSingleTextMultiplePad(self):
+ """Internal: Align single cell text to bottom."""
+ result = _alignCellVeritically(["one line"], 3, 8, "bottom")
+ expected = [" ", " ", "one line"]
+ self.assertEqual(expected, result)
+
+ def test_alignCellVeriticallyTopMultiTextMultiplePad(self):
+ """Internal: Align multiline celltext text to top."""
+ text = ["just", "one ", "cell"]
+ result = _alignCellVeritically(text, 6, 4, "top")
+ expected = ["just", "one ", "cell", " ", " ", " "]
+ self.assertEqual(expected, result)
+
+ def test_alignCellVeriticallyCenterMultiTextMultiplePad(self):
+ """Internal: Align multiline celltext text to center."""
+ text = ["just", "one ", "cell"]
+ result = _alignCellVeritically(text, 6, 4, "center")
+
+ # Even number of rows, can't perfectly center, but we pad less
+ # at top when required to do make a judgement
+ expected = [" ", "just", "one ", "cell", " ", " "]
+ self.assertEqual(expected, result)
+
+ def test_alignCellVeriticallyBottomMultiTextMultiplePad(self):
+ """Internal: Align multiline celltext text to bottom."""
+ text = ["just", "one ", "cell"]
+ result = _alignCellVeritically(text, 6, 4, "bottom")
+ expected = [" ", " ", " ", "just", "one ", "cell"]
+ self.assertEqual(expected, result)
+
+ def test_assortedRareEdgeCases(self):
+ """Test some of the more rare edge cases in the purely internal functions."""
+ from armi.utils.tabulate import _alignHeader
+ from armi.utils.tabulate import _prependRowIndex
+ from armi.utils.tabulate import _removeSeparatingLines
+
+ self.assertEqual(_alignHeader("123", False, 3, 3, False, None), "123")
+
+ result = _removeSeparatingLines(123)
+ self.assertEqual(result[0], 123)
+ self.assertIsNone(result[1])
+
+ self.assertEqual(_prependRowIndex([123], None), [123])
+
+ def test_bool(self):
+ self.assertTrue(_bool("stuff"))
+ self.assertFalse(_bool(""))
+ self.assertTrue(_bool(123))
+ self.assertFalse(_bool(np.array([1, 0, -1])))
+
+ def test_buildLine(self):
+ """Basic sanity test of internal _buildLine() function."""
+ lineFormat = _tableFormats["armi"].lineabove
+ self.assertEqual(_buildLine([2, 2], ["center", "center"], lineFormat), "-- --")
+
+ formatter = lambda a, b: "xyz"
+ self.assertEqual(_buildLine([2, 2], ["center", "center"], formatter), "xyz")
+
+ self.assertIsNone(_buildLine([2, 2], ["center", "center"], None))
+
+ def test_buildRow(self):
+ """Basic sanity test of internal _buildRow() function."""
+ rowFormat = _tableFormats["armi"].datarow
+ self.assertEqual(_buildRow("", [2, 2], ["center", "center"], rowFormat), "")
+
+ formatter = lambda a, b, c: "xyz"
+ d = {"a": 1, "b": 2}
+ self.assertEqual(_buildRow(d, [2, 2], ["center", "center"], formatter), "xyz")
+
+ lst = ["ab", "cd"]
+ self.assertEqual(
+ _buildRow(lst, [2, 2], ["center", "center"], rowFormat), "ab cd"
+ )
+
+ self.assertIsNone(_buildRow("ab", [2, 2], ["center", "center"], ""))
+
+ def test_format(self):
+ """Basic sanity test of internal _format() function."""
+ self.assertEqual(_format(None, str, "8", "", "X", True), "X")
+ self.assertEqual(_format(123, str, "8", "", "X", True), "123")
+ self.assertEqual(_format("123", int, "8", "", "X", True), "123")
+ self.assertEqual(
+ _format(bytes("abc", "utf-8"), bytes, "8", "", "X", True), "abc"
+ )
+ self.assertEqual(_format("3.14", float, "4", "", "X", True), "3.14")
+ colorNum = "\x1b[31m3.14\x1b[0m"
+ self.assertEqual(_format(colorNum, float, "4", "", "X", True), colorNum)
+ self.assertEqual(_format(None, None, "8", "", "X", True), "X")
+
+ def test_isMultiline(self):
+ """Basic sanity test of internal _isMultiline() function."""
+ self.assertFalse(_isMultiline("world"))
+ self.assertTrue(_isMultiline("hello\nworld"))
+ self.assertFalse(_isMultiline(bytes("world", "utf-8")))
+ self.assertTrue(_isMultiline(bytes("hello\nworld", "utf-8")))
+
+ def test_multilineWidth(self):
+ """Internal: _multilineWidth()."""
+ multilineString = "\n".join(["foo", "barbaz", "spam"])
+ self.assertEqual(_multilineWidth(multilineString), 6)
+ onelineString = "12345"
+ self.assertEqual(_multilineWidth(onelineString), len(onelineString))
+
+ def test_normalizeTabularData(self):
+ """Basic sanity test of internal _normalizeTabularData() function."""
+ res = _normalizeTabularData([[1, 2], [3, 4]], np.array(["a", "b"]), "default")
+ self.assertEqual(res[0], [[1, 2], [3, 4]])
+ self.assertEqual(res[1], ["a", "b"])
+ self.assertEqual(res[2], 0)
+
+ res = _normalizeTabularData([], "keys", "default")
+ self.assertEqual(len(res[0]), 0)
+ self.assertEqual(len(res[1]), 0)
+ self.assertEqual(res[2], 0)
+
+ res = _normalizeTabularData([], "firstrow", "default")
+ self.assertEqual(len(res[0]), 0)
+ self.assertEqual(len(res[1]), 0)
+ self.assertEqual(res[2], 0)
+
+ @dataclass
+ class row:
+ a: int
+ b: int
+
+ rows = [row(1, 2), row(3, 4)]
+ res = _normalizeTabularData(rows, "keys", "default")
+ self.assertEqual(res[0], [[1, 2], [3, 4]])
+ self.assertEqual(res[1], ["a", "b"])
+ self.assertEqual(res[2], 0)
+
+ res = _normalizeTabularData(rows, ["x", "y"], "default")
+ self.assertEqual(res[0], [[1, 2], [3, 4]])
+ self.assertEqual(res[1], ["x", "y"])
+ self.assertEqual(res[2], 0)
+
+ def test_type(self):
+ """Basic sanity test of internal _type() function."""
+ self.assertEqual(_type(None), type(None))
+ self.assertEqual(_type("foo"), type(""))
+ self.assertEqual(_type("1"), type(1))
+ self.assertEqual(_type("\x1b[31m42\x1b[0m"), type(42))
+ self.assertEqual(_type("\x1b[31m42\x1b[0m"), type(42))
+ self.assertEqual(_type(datetime.now()), type("2024-12-31"))
+
+ def test_visibleWidth(self):
+ """Basic sanity test of internal _visibleWidth() function."""
+ self.assertEqual(_visibleWidth("world"), 5)
+ self.assertEqual(_visibleWidth("\x1b[31mhello\x1b[0m"), 5)
+ self.assertEqual(_visibleWidth(np.ones(3)), 10)
+
+ def test_wrapTextToColWidths(self):
+ """Basic sanity test of internal _wrapTextToColWidths() function."""
+ res = _wrapTextToColWidths([], [2, 2], True)
+ self.assertEqual(len(res), 0)
+
+ res = _wrapTextToColWidths([[1], [2]], [2, 2], True)
+ self.assertEqual(res[0][0], 1)
+ self.assertEqual(res[1][0], 2)
+
+ res = _wrapTextToColWidths([["1"], ["2"]], [2, 2], False)
+ self.assertEqual(res[0][0], "1")
+ self.assertEqual(res[1][0], "2")
+
+
+class TestTabulateOutput(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.testTable = [["spam", 41.9999], ["eggs", "451.0"]]
+ cls.testTableWithSepLine = [
+ ["spam", 41.9999],
+ SEPARATING_LINE,
+ ["eggs", "451.0"],
+ ]
+ cls.testTableHeaders = ["strings", "numbers"]
+
+ def test_plain(self):
+ """Output: plain with headers."""
+ expected = "\n".join(
+ ["strings numbers", "spam 41.9999", "eggs 451"]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainHeaderless(self):
+ """Output: plain without headers."""
+ expected = "\n".join(["spam 41.9999", "eggs 451"])
+ result = tabulate(self.testTable, tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMultilineHeaderless(self):
+ """Output: plain with multiline cells without headers."""
+ table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
+ expected = "\n".join(
+ [
+ "foo bar hello",
+ " baz",
+ " bau",
+ " multiline",
+ " world",
+ ]
+ )
+ result = tabulate(table, strAlign="center", tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMultiline(self):
+ """Output: plain with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
+ expected = "\n".join(
+ [
+ " more more spam",
+ " spam \x1b[31meggs\x1b[0m & eggs",
+ " 2 foo",
+ " bar",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMultilineWithLinks(self):
+ """Output: plain with multiline cells with links and headers."""
+ table = [[2, "foo\nbar"]]
+ headers = (
+ "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\",
+ "more spam\n& eggs",
+ )
+ expected = "\n".join(
+ [
+ " more more spam",
+ " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
+ " 2 foo",
+ " bar",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMultilineWithEmptyCells(self):
+ """Output: plain with multiline cells and empty cells with headers."""
+ table = [
+ ["hdr", "data", "fold"],
+ ["1", "", ""],
+ ["2", "very long data", "fold\nthis"],
+ ]
+ expected = "\n".join(
+ [
+ " hdr data fold",
+ " 1",
+ " 2 very long data fold",
+ " this",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMultilineWithEmptyCellsHeaderless(self):
+ """Output: plain with multiline cells and empty cells without headers."""
+ table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
+ expected = "\n".join(
+ ["0", "1", "2 very long data fold", " this"]
+ )
+ result = tabulate(table, tableFmt="plain")
+ self.assertEqual(expected, result)
+
+ def test_plainMaxcolwidthAutowraps(self):
+ """Output: maxcolwidth will result in autowrapping longer cells."""
+ table = [["hdr", "fold"], ["1", "very long data"]]
+ expected = "\n".join([" hdr fold", " 1 very long", " data"])
+ result = tabulate(
+ table, headers="firstrow", tableFmt="plain", maxColWidths=[10, 10]
+ )
+ self.assertEqual(expected, result)
+
+ def test_plainMaxcolwidthAutowrapsWithSep(self):
+ """Output: maxcolwidth will result in autowrapping longer cells and separating line."""
+ table = [
+ ["hdr", "fold"],
+ ["1", "very long data"],
+ SEPARATING_LINE,
+ ["2", "last line"],
+ ]
+ expected = "\n".join(
+ [" hdr fold", " 1 very long", " data", "", " 2 last line"]
+ )
+ result = tabulate(
+ table, headers="firstrow", tableFmt="plain", maxColWidths=[10, 10]
+ )
+ self.assertEqual(expected, result)
+
+ def test_maxColWidthsingleValue(self):
+ """Output: maxcolwidth can be specified as a single number that works for each column."""
+ table = [
+ ["hdr", "fold1", "fold2"],
+ ["mini", "this is short", "this is a bit longer"],
+ ]
+ expected = "\n".join(
+ [
+ "hdr fold1 fold2",
+ "mini this this",
+ " is is a",
+ " short bit",
+ " longer",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="plain", maxColWidths=6)
+ self.assertEqual(expected, result)
+
+ def test_maxcolwidthPadTailingWidths(self):
+ """Output: maxcolwidth, if only partly specified, pads tailing cols with None."""
+ table = [
+ ["hdr", "fold1", "fold2"],
+ ["mini", "this is short", "this is a bit longer"],
+ ]
+ expected = "\n".join(
+ [
+ "hdr fold1 fold2",
+ "mini this this is a bit longer",
+ " is",
+ " short",
+ ]
+ )
+ result = tabulate(
+ table, headers="firstrow", tableFmt="plain", maxColWidths=[None, 6]
+ )
+ self.assertEqual(expected, result)
+
+ def test_maxcolwidthHonorDisableParsenum(self):
+ """Output: Using maxcolwidth in conjunction with disable_parsenum is honored."""
+ table = [
+ ["first number", 123.456789, "123.456789"],
+ ["second number", "987654321.123", "987654321.123"],
+ ]
+ expected = "\n".join(
+ [
+ "+--------+---------------+--------+",
+ "| first | 123.457 | 123.45 |",
+ "| number | | 6789 |",
+ "+--------+---------------+--------+",
+ "| second | 9.87654e+08 | 987654 |",
+ "| number | | 321.12 |",
+ "| | | 3 |",
+ "+--------+---------------+--------+",
+ ]
+ )
+ # Grid makes showing the alignment difference a little easier
+ result = tabulate(table, tableFmt="grid", maxColWidths=6, disableNumParse=[2])
+ self.assertEqual(expected, result)
+
+ def test_plainmaxHeaderColWidthsAutowraps(self):
+ """Output: maxHeaderColWidths will result in autowrapping header cell."""
+ table = [["hdr", "fold"], ["1", "very long data"]]
+ expected = "\n".join(
+ [" hdr fo", " ld", " 1 very long", " data"]
+ )
+ result = tabulate(
+ table,
+ headers="firstrow",
+ tableFmt="plain",
+ maxColWidths=[10, 10],
+ maxHeaderColWidths=[None, 2],
+ )
+ self.assertEqual(expected, result)
+
+ def test_simple(self):
+ """Output: simple with headers."""
+ expected = "\n".join(
+ [
+ "strings numbers",
+ "--------- ---------",
+ "spam 41.9999",
+ "eggs 451",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleWithSepLine(self):
+ """Output: simple with headers and separating line."""
+ expected = "\n".join(
+ [
+ "strings numbers",
+ "--------- ---------",
+ "spam 41.9999",
+ "--------- ---------",
+ "eggs 451",
+ ]
+ )
+ result = tabulate(
+ self.testTableWithSepLine, self.testTableHeaders, tableFmt="simple"
+ )
+ self.assertEqual(expected, result)
+
+ def test_readmeExampleWithSep(self):
+ table = [["Earth", 6371], ["Mars", 3390], SEPARATING_LINE, ["Moon", 1737]]
+ expected = "\n".join(
+ [
+ "----- ----",
+ "Earth 6371",
+ "Mars 3390",
+ "----- ----",
+ "Moon 1737",
+ "----- ----",
+ ]
+ )
+ result = tabulate(table, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultiline2(self):
+ """Output: simple with multiline cells."""
+ expected = "\n".join(
+ [
+ " key value",
+ "----- ---------",
+ " foo bar",
+ "spam multiline",
+ " world",
+ ]
+ )
+ table = [["key", "value"], ["foo", "bar"], ["spam", "multiline\nworld"]]
+ result = tabulate(
+ table, headers="firstrow", strAlign="center", tableFmt="simple"
+ )
+ self.assertEqual(expected, result)
+
+ def test_simpleMultiline2WithSepLine(self):
+ """Output: simple with multiline cells."""
+ expected = "\n".join(
+ [
+ " key value",
+ "----- ---------",
+ " foo bar",
+ "----- ---------",
+ "spam multiline",
+ " world",
+ ]
+ )
+ table = [
+ ["key", "value"],
+ ["foo", "bar"],
+ SEPARATING_LINE,
+ ["spam", "multiline\nworld"],
+ ]
+ result = tabulate(
+ table, headers="firstrow", strAlign="center", tableFmt="simple"
+ )
+ self.assertEqual(expected, result)
+
+ def test_simpleHeaderless(self):
+ """Output: simple without headers."""
+ expected = "\n".join(
+ ["---- --------", "spam 41.9999", "eggs 451", "---- --------"]
+ )
+ result = tabulate(self.testTable, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleHeaderlessWithSepLine(self):
+ """Output: simple without headers."""
+ expected = "\n".join(
+ [
+ "---- --------",
+ "spam 41.9999",
+ "---- --------",
+ "eggs 451",
+ "---- --------",
+ ]
+ )
+ result = tabulate(self.testTableWithSepLine, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultilineHeaderless(self):
+ """Output: simple with multiline cells without headers."""
+ table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
+ expected = "\n".join(
+ [
+ "------- ---------",
+ "foo bar hello",
+ " baz",
+ " bau",
+ " multiline",
+ " world",
+ "------- ---------",
+ ]
+ )
+ result = tabulate(table, strAlign="center", tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultiline(self):
+ """Output: simple with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
+ expected = "\n".join(
+ [
+ " more more spam",
+ " spam \x1b[31meggs\x1b[0m & eggs",
+ "----------- -----------",
+ " 2 foo",
+ " bar",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultilineWithLinks(self):
+ """Output: simple with multiline cells with links and headers."""
+ table = [[2, "foo\nbar"]]
+ headers = (
+ "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\",
+ "more spam\n& eggs",
+ )
+ expected = "\n".join(
+ [
+ " more more spam",
+ " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
+ "----------- -----------",
+ " 2 foo",
+ " bar",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultilineWithEmptyCells(self):
+ """Output: simple with multiline cells and empty cells with headers."""
+ table = [
+ ["hdr", "data", "fold"],
+ ["1", "", ""],
+ ["2", "very long data", "fold\nthis"],
+ ]
+ expected = "\n".join(
+ [
+ " hdr data fold",
+ "----- -------------- ------",
+ " 1",
+ " 2 very long data fold",
+ " this",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_simpleMultilineWithEmptyCellsHeaderless(self):
+ """Output: simple with multiline cells and empty cells without headers."""
+ table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
+ expected = "\n".join(
+ [
+ "- -------------- ----",
+ "0",
+ "1",
+ "2 very long data fold",
+ " this",
+ "- -------------- ----",
+ ]
+ )
+ result = tabulate(table, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_github(self):
+ """Output: github with headers."""
+ expected = "\n".join(
+ [
+ "| strings | numbers |",
+ "|-----------|-----------|",
+ "| spam | 41.9999 |",
+ "| eggs | 451 |",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="github")
+ self.assertEqual(expected, result)
+
+ def test_grid(self):
+ """Output: grid with headers."""
+ expected = "\n".join(
+ [
+ "+-----------+-----------+",
+ "| strings | numbers |",
+ "+===========+===========+",
+ "| spam | 41.9999 |",
+ "+-----------+-----------+",
+ "| eggs | 451 |",
+ "+-----------+-----------+",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_gridHeaderless(self):
+ """Output: grid without headers."""
+ expected = "\n".join(
+ [
+ "+------+----------+",
+ "| spam | 41.9999 |",
+ "+------+----------+",
+ "| eggs | 451 |",
+ "+------+----------+",
+ ]
+ )
+ result = tabulate(self.testTable, tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_gridMultilineHeaderless(self):
+ """Output: grid with multiline cells without headers."""
+ table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
+ expected = "\n".join(
+ [
+ "+---------+-----------+",
+ "| foo bar | hello |",
+ "| baz | |",
+ "| bau | |",
+ "+---------+-----------+",
+ "| | multiline |",
+ "| | world |",
+ "+---------+-----------+",
+ ]
+ )
+ result = tabulate(table, strAlign="center", tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_gridMultiline(self):
+ """Output: grid with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
+ expected = "\n".join(
+ [
+ "+-------------+-------------+",
+ "| more | more spam |",
+ "| spam \x1b[31meggs\x1b[0m | & eggs |",
+ "+=============+=============+",
+ "| 2 | foo |",
+ "| | bar |",
+ "+-------------+-------------+",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_gridMultilineWithEmptyCells(self):
+ """Output: grid with multiline cells and empty cells with headers."""
+ table = [
+ ["hdr", "data", "fold"],
+ ["1", "", ""],
+ ["2", "very long data", "fold\nthis"],
+ ]
+ expected = "\n".join(
+ [
+ "+-------+----------------+--------+",
+ "| hdr | data | fold |",
+ "+=======+================+========+",
+ "| 1 | | |",
+ "+-------+----------------+--------+",
+ "| 2 | very long data | fold |",
+ "| | | this |",
+ "+-------+----------------+--------+",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_gridMultilineWithEmptyCellsHeaderless(self):
+ """Output: grid with multiline cells and empty cells without headers."""
+ table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
+ expected = "\n".join(
+ [
+ "+---+----------------+------+",
+ "| 0 | | |",
+ "+---+----------------+------+",
+ "| 1 | | |",
+ "+---+----------------+------+",
+ "| 2 | very long data | fold |",
+ "| | | this |",
+ "+---+----------------+------+",
+ ]
+ )
+ result = tabulate(table, tableFmt="grid")
+ self.assertEqual(expected, result)
+
+ def test_pretty(self):
+ """Output: pretty with headers."""
+ expected = "\n".join(
+ [
+ "+---------+---------+",
+ "| strings | numbers |",
+ "+---------+---------+",
+ "| spam | 41.9999 |",
+ "| eggs | 451.0 |",
+ "+---------+---------+",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyHeaderless(self):
+ """Output: pretty without headers."""
+ expected = "\n".join(
+ [
+ "+------+---------+",
+ "| spam | 41.9999 |",
+ "| eggs | 451.0 |",
+ "+------+---------+",
+ ]
+ )
+ result = tabulate(self.testTable, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyMultilineHeaderless(self):
+ """Output: pretty with multiline cells without headers."""
+ table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]]
+ expected = "\n".join(
+ [
+ "+---------+-----------+",
+ "| foo bar | hello |",
+ "| baz | |",
+ "| bau | |",
+ "| | multiline |",
+ "| | world |",
+ "+---------+-----------+",
+ ]
+ )
+ result = tabulate(table, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyMultiline(self):
+ """Output: pretty with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
+ expected = "\n".join(
+ [
+ "+-----------+-----------+",
+ "| more | more spam |",
+ "| spam \x1b[31meggs\x1b[0m | & eggs |",
+ "+-----------+-----------+",
+ "| 2 | foo |",
+ "| | bar |",
+ "+-----------+-----------+",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyMultilineWithLinks(self):
+ """Output: pretty with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = (
+ "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\",
+ "more spam\n& eggs",
+ )
+ expected = "\n".join(
+ [
+ "+-----------+-----------+",
+ "| more | more spam |",
+ "| spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ | & eggs |",
+ "+-----------+-----------+",
+ "| 2 | foo |",
+ "| | bar |",
+ "+-----------+-----------+",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyMultilineWithEmptyCells(self):
+ """Output: pretty with multiline cells and empty cells with headers."""
+ table = [
+ ["hdr", "data", "fold"],
+ ["1", "", ""],
+ ["2", "very long data", "fold\nthis"],
+ ]
+ expected = "\n".join(
+ [
+ "+-----+----------------+------+",
+ "| hdr | data | fold |",
+ "+-----+----------------+------+",
+ "| 1 | | |",
+ "| 2 | very long data | fold |",
+ "| | | this |",
+ "+-----+----------------+------+",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_prettyMultilineWithEmptyCellsHeaderless(self):
+ """Output: pretty with multiline cells and empty cells without headers."""
+ table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
+ expected = "\n".join(
+ [
+ "+---+----------------+------+",
+ "| 0 | | |",
+ "| 1 | | |",
+ "| 2 | very long data | fold |",
+ "| | | this |",
+ "+---+----------------+------+",
+ ]
+ )
+ result = tabulate(table, tableFmt="pretty")
+ self.assertEqual(expected, result)
+
+ def test_rst(self):
+ """Output: rst with headers."""
+ expected = "\n".join(
+ [
+ "========= =========",
+ "strings numbers",
+ "========= =========",
+ "spam 41.9999",
+ "eggs 451",
+ "========= =========",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstWithEmptyValuesInFirstColumn(self):
+ """Output: rst with dots in first column."""
+ test_headers = ["", "what"]
+ test_data = [("", "spam"), ("", "eggs")]
+ expected = "\n".join(
+ [
+ "==== ======",
+ ".. what",
+ "==== ======",
+ ".. spam",
+ ".. eggs",
+ "==== ======",
+ ]
+ )
+ result = tabulate(test_data, test_headers, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstHeaderless(self):
+ """Output: rst without headers."""
+ expected = "\n".join(
+ ["==== ========", "spam 41.9999", "eggs 451", "==== ========"]
+ )
+ result = tabulate(self.testTable, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstMultiline(self):
+ """Output: rst with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs")
+ expected = "\n".join(
+ [
+ "=========== ===========",
+ " more more spam",
+ " spam \x1b[31meggs\x1b[0m & eggs",
+ "=========== ===========",
+ " 2 foo",
+ " bar",
+ "=========== ===========",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstMultilineWithLinks(self):
+ """Output: rst with multiline cells with headers."""
+ table = [[2, "foo\nbar"]]
+ headers = (
+ "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\",
+ "more spam\n& eggs",
+ )
+ expected = "\n".join(
+ [
+ "=========== ===========",
+ " more more spam",
+ " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs",
+ "=========== ===========",
+ " 2 foo",
+ " bar",
+ "=========== ===========",
+ ]
+ )
+ result = tabulate(table, headers, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstMultilineWithEmptyCells(self):
+ """Output: rst with multiline cells and empty cells with headers."""
+ table = [
+ ["hdr", "data", "fold"],
+ ["1", "", ""],
+ ["2", "very long data", "fold\nthis"],
+ ]
+ expected = "\n".join(
+ [
+ "===== ============== ======",
+ " hdr data fold",
+ "===== ============== ======",
+ " 1",
+ " 2 very long data fold",
+ " this",
+ "===== ============== ======",
+ ]
+ )
+ result = tabulate(table, headers="firstrow", tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_rstMultilineWithEmptyCellsHeaderless(self):
+ """Output: rst with multiline cells and empty cells without headers."""
+ table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]]
+ expected = "\n".join(
+ [
+ "= ============== ====",
+ "0",
+ "1",
+ "2 very long data fold",
+ " this",
+ "= ============== ====",
+ ]
+ )
+ result = tabulate(table, tableFmt="rst")
+ self.assertEqual(expected, result)
+
+ def test_noData(self):
+ """Output: table with no data."""
+ expected = "\n".join(["strings numbers", "--------- ---------"])
+ result = tabulate(None, self.testTableHeaders, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_emptyData(self):
+ """Output: table with empty data."""
+ expected = "\n".join(["strings numbers", "--------- ---------"])
+ result = tabulate([], self.testTableHeaders, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_noDataWithoutHeaders(self):
+ """Output: table with no data and no headers."""
+ expected = ""
+ result = tabulate(None, tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_emptyDataWithoutHeaders(self):
+ """Output: table with empty data and no headers."""
+ expected = ""
+ result = tabulate([], tableFmt="simple")
+ self.assertEqual(expected, result)
+
+ def test_intFmt(self):
+ """Output: integer format."""
+ result = tabulate([[10000], [10]], intFmt=",", tableFmt="plain")
+ expected = "10,000\n 10"
+ self.assertEqual(expected, result)
+
+ def test_emptyDataWithHeaders(self):
+ """Output: table with empty data and headers as firstrow."""
+ expected = ""
+ result = tabulate([], headers="firstrow")
+ self.assertEqual(expected, result)
+
+ def test_floatFmt(self):
+ """Output: floating point format."""
+ result = tabulate([["1.23456789"], [1.0]], floatFmt=".3f", tableFmt="plain")
+ expected = "1.235\n1.000"
+ self.assertEqual(expected, result)
+
+ def test_floatFmtMulti(self):
+ """Output: floating point format different for each column."""
+ result = tabulate(
+ [[0.12345, 0.12345, 0.12345]], floatFmt=(".1f", ".3f"), tableFmt="plain"
+ )
+ expected = "0.1 0.123 0.12345"
+ self.assertEqual(expected, result)
+
+ def test_colAlignMulti(self):
+ """Output: string columns with custom colAlign."""
+ result = tabulate(
+ [["one", "two"], ["three", "four"]], colAlign=("right",), tableFmt="plain"
+ )
+ expected = " one two\nthree four"
+ self.assertEqual(expected, result)
+
+ def test_colAlignMultiWithSepLine(self):
+ """Output: string columns with custom colAlign."""
+ result = tabulate(
+ [["one", "two"], SEPARATING_LINE, ["three", "four"]],
+ colAlign=("right",),
+ tableFmt="plain",
+ )
+ expected = " one two\n\nthree four"
+ self.assertEqual(expected, result)
+
+ def test_columnGlobalAndSpecificAlignment(self):
+ """Test `colGlobalAlign` and `"global"` parameter for `colAlign`."""
+ table = [[1, 2, 3, 4], [111, 222, 333, 444]]
+ colGlobalAlign = "center"
+ colAlign = ("global", "left", "right")
+ result = tabulate(table, colGlobalAlign=colGlobalAlign, colAlign=colAlign)
+ expected = "\n".join(
+ [
+ "--- --- --- ---",
+ " 1 2 3 4",
+ "111 222 333 444",
+ "--- --- --- ---",
+ ]
+ )
+ self.assertEqual(expected, result)
+
+ def test_headersGlobalAndSpecificAlignment(self):
+ """Test `headersGlobalAlign` and `headersAlign`."""
+ table = [[1, 2, 3, 4, 5, 6], [111, 222, 333, 444, 555, 666]]
+ colGlobalAlign = "center"
+ colAlign = ("left",)
+ headers = ["h", "e", "a", "d", "e", "r"]
+ headersGlobalAlign = "right"
+ headersAlign = ("same", "same", "left", "global", "center")
+ result = tabulate(
+ table,
+ headers=headers,
+ colGlobalAlign=colGlobalAlign,
+ colAlign=colAlign,
+ headersGlobalAlign=headersGlobalAlign,
+ headersAlign=headersAlign,
+ )
+ expected = "\n".join(
+ [
+ "h e a d e r",
+ "--- --- --- --- --- ---",
+ "1 2 3 4 5 6",
+ "111 222 333 444 555 666",
+ ]
+ )
+ self.assertEqual(expected, result)
+
+ def test_colAlignOrheadersAlignTooLong(self):
+ """Test `colAlign` and `headersAlign` too long."""
+ table = [[1, 2], [111, 222]]
+ colAlign = ("global", "left", "center")
+ headers = ["h"]
+ headersAlign = ("center", "right", "same")
+ result = tabulate(
+ table, headers=headers, colAlign=colAlign, headersAlign=headersAlign
+ )
+ expected = "\n".join([" h", "--- ---", " 1 2", "111 222"])
+ self.assertEqual(expected, result)
+
+ def test_floatConversions(self):
+ """Output: float format parsed."""
+ test_headers = [
+ "str",
+ "bad_float",
+ "just_float",
+ "with_inf",
+ "with_nan",
+ "neg_inf",
+ ]
+ testTable = [
+ ["spam", 41.9999, "123.345", "12.2", "nan", "0.123123"],
+ ["eggs", "451.0", 66.2222, "inf", 123.1234, "-inf"],
+ ["asd", "437e6548", 1.234e2, float("inf"), float("nan"), 0.22e23],
+ ]
+ result = tabulate(testTable, test_headers, tableFmt="grid")
+ expected = "\n".join(
+ [
+ "+-------+-------------+--------------+------------+------------+-------------+",
+ "| str | bad_float | just_float | with_inf | with_nan | neg_inf |",
+ "+=======+=============+==============+============+============+=============+",
+ "| spam | 41.9999 | 123.345 | 12.2 | nan | 0.123123 |",
+ "+-------+-------------+--------------+------------+------------+-------------+",
+ "| eggs | 451.0 | 66.2222 | inf | 123.123 | -inf |",
+ "+-------+-------------+--------------+------------+------------+-------------+",
+ "| asd | 437e6548 | 123.4 | inf | nan | 2.2e+22 |",
+ "+-------+-------------+--------------+------------+------------+-------------+",
+ ]
+ )
+ self.assertEqual(expected, result)
+
+ def test_missingVal(self):
+ """Output: substitution of missing values."""
+ result = tabulate(
+ [["Alice", 10], ["Bob", None]], missingVal="n/a", tableFmt="plain"
+ )
+ expected = "Alice 10\nBob n/a"
+ self.assertEqual(expected, result)
+
+ def test_missingValMulti(self):
+ """Output: substitution of missing values with different values per column."""
+ result = tabulate(
+ [["Alice", "Bob", "Charlie"], [None, None, None]],
+ missingVal=("n/a", "?"),
+ tableFmt="plain",
+ )
+ expected = "Alice Bob Charlie\nn/a ?"
+ self.assertEqual(expected, result)
+
+ def test_columnAlignment(self):
+ """Output: custom alignment for text and numbers."""
+ expected = "\n".join(["----- ---", "Alice 1", " Bob 333", "----- ---"])
+ result = tabulate(
+ [["Alice", 1], ["Bob", 333]], strAlign="right", numAlign="center"
+ )
+ self.assertEqual(expected, result)
+
+ def test_dictLikeWithIndex(self):
+ """Output: a table with a running index."""
+ dd = {"b": range(101, 104)}
+ expected = "\n".join([" b", "-- ---", " 0 101", " 1 102", " 2 103"])
+ result = tabulate(dd, "keys", showIndex=True)
+ self.assertEqual(expected, result)
+
+ def test_listOfListsWithIndex(self):
+ """Output: a table with a running index."""
+ dd = zip(*[range(3), range(101, 104)])
+ # keys' order (hence columns' order) is not deterministic in Python 3
+ # => we have to consider both possible results as valid
+ expected = "\n".join(
+ [
+ " a b",
+ "-- --- ---",
+ " 0 0 101",
+ " 1 1 102",
+ " 2 2 103",
+ ]
+ )
+ result = tabulate(dd, headers=["a", "b"], showIndex=True)
+ self.assertEqual(expected, result)
+
+ def test_listOfListsWithIndexWithSepLine(self):
+ """Output: a table with a running index."""
+ dd = [(0, 101), SEPARATING_LINE, (1, 102), (2, 103)]
+ # keys' order (hence columns' order) is not deterministic in Python 3
+ # => we have to consider both possible results as valid
+ expected = "\n".join(
+ [
+ " a b",
+ "-- --- ---",
+ " 0 0 101",
+ "-- --- ---",
+ " 1 1 102",
+ " 2 2 103",
+ ]
+ )
+ result = tabulate(dd, headers=["a", "b"], showIndex=True)
+ self.assertEqual(expected, result)
+
+ def test_listOfListsWithSuppliedIndex(self):
+ """Output: a table with a supplied index."""
+ dd = zip(*[list(range(3)), list(range(101, 104))])
+ expected = "\n".join(
+ [
+ " a b",
+ "-- --- ---",
+ " 1 0 101",
+ " 2 1 102",
+ " 3 2 103",
+ ]
+ )
+ result = tabulate(dd, headers=["a", "b"], showIndex=[1, 2, 3])
+ self.assertEqual(expected, result)
+ # the index must be as long as the number of rows
+ with self.assertRaises(ValueError):
+ tabulate(dd, headers=["a", "b"], showIndex=[1, 2])
+
+ def test_listOfListsWithIndexFirstrow(self):
+ """Output: a table with a running index and header='firstrow'."""
+ dd = zip(*[["a"] + list(range(3)), ["b"] + list(range(101, 104))])
+ expected = "\n".join(
+ [
+ " a b",
+ "-- --- ---",
+ " 0 0 101",
+ " 1 1 102",
+ " 2 2 103",
+ ]
+ )
+ result = tabulate(dd, headers="firstrow", showIndex=True)
+ self.assertEqual(expected, result)
+ # the index must be as long as the number of rows
+ with self.assertRaises(ValueError):
+ tabulate(dd, headers="firstrow", showIndex=[1, 2])
+
+ def test_disableNumParseDefault(self):
+ """Output: Default table output with number parsing and alignment."""
+ expected = "\n".join(
+ [
+ "strings numbers",
+ "--------- ---------",
+ "spam 41.9999",
+ "eggs 451",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders)
+ self.assertEqual(expected, result)
+ result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=False)
+ self.assertEqual(expected, result)
+
+ def test_disableNumParseTrue(self):
+ """Output: Default table output, but without number parsing and alignment."""
+ expected = "\n".join(
+ [
+ "strings numbers",
+ "--------- ---------",
+ "spam 41.9999",
+ "eggs 451.0",
+ ]
+ )
+ result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=True)
+ self.assertEqual(expected, result)
+
+ def test_disableNumParseList(self):
+ """Output: Default table output, but with number parsing selectively disabled."""
+ tableHeaders = ["h1", "h2", "h3"]
+ testTable = [["foo", "bar", "42992e1"]]
+ expected = "\n".join(
+ ["h1 h2 h3", "---- ---- -------", "foo bar 42992e1"]
+ )
+ result = tabulate(testTable, tableHeaders, disableNumParse=[2])
+ self.assertEqual(expected, result)
+
+ expected = "\n".join(
+ ["h1 h2 h3", "---- ---- ------", "foo bar 429920"]
+ )
+ result = tabulate(testTable, tableHeaders, disableNumParse=[0, 1])
+ self.assertEqual(expected, result)
diff --git a/armi/utils/tests/test_utils.py b/armi/utils/tests/test_utils.py
index c2131c308..826a155ef 100644
--- a/armi/utils/tests/test_utils.py
+++ b/armi/utils/tests/test_utils.py
@@ -14,6 +14,7 @@
"""Testing some utility functions."""
from collections import defaultdict
+import os
import unittest
import numpy as np
@@ -21,6 +22,7 @@
from armi import utils
from armi.reactor.tests.test_reactors import loadTestReactor
from armi.settings.caseSettings import Settings
+from armi.tests import mockRunLogs
from armi.utils import (
directoryChangers,
getPowerFractions,
@@ -37,6 +39,7 @@
getCumulativeNodeNum,
hasBurnup,
codeTiming,
+ safeCopy,
)
@@ -140,7 +143,9 @@ def test_plotMatrix(self):
def test_classesInHierarchy(self):
"""Tests the classesInHierarchy utility."""
# load the test reactor
- _o, r = loadTestReactor()
+ _o, r = loadTestReactor(
+ inputFileName="smallestTestReactor/armiRunSmallest.yaml"
+ )
# call the `classesInHierarchy` function
classCounts = defaultdict(lambda: 0)
@@ -152,8 +157,8 @@ def test_classesInHierarchy(self):
self.assertEqual(classCounts[type(r.core)], 1)
# further validate the Reactor heirarchy is in place
- self.assertGreater(len(r.core.getAssemblies()), 50)
- self.assertGreater(len(r.core.getBlocks()), 200)
+ self.assertEqual(len(r.core.getAssemblies()), 1)
+ self.assertEqual(len(r.core.getBlocks()), 1)
def test_codeTiming(self):
"""Test that codeTiming preserves function attributes when it wraps a function."""
@@ -166,6 +171,33 @@ def testFunc():
self.assertEqual(getattr(testFunc, "__doc__"), "Test function docstring.")
self.assertEqual(getattr(testFunc, "__name__"), "testFunc")
+ def test_safeCopy(self):
+ with directoryChangers.TemporaryDirectoryChanger():
+ os.mkdir("dir1")
+ os.mkdir("dir2")
+ file1 = "dir1/file1.txt"
+ with open(file1, "w") as f:
+ f.write("Hello")
+ file2 = "dir1\\file2.txt"
+ with open(file2, "w") as f:
+ f.write("Hello2")
+
+ with mockRunLogs.BufferLog() as mock:
+ # Test Linuxy file path
+ self.assertEqual("", mock.getStdout())
+ safeCopy(file1, "dir2")
+ self.assertIn("Copied", mock.getStdout())
+ self.assertIn("file1", mock.getStdout())
+ self.assertIn("->", mock.getStdout())
+ # Clean up for next safeCopy
+ mock.emptyStdout()
+ # Test Windowsy file path
+ self.assertEqual("", mock.getStdout())
+ safeCopy(file2, "dir2")
+ self.assertIn("Copied", mock.getStdout())
+ self.assertIn("file2", mock.getStdout())
+ self.assertIn("->", mock.getStdout())
+
class CyclesSettingsTests(unittest.TestCase):
"""
diff --git a/armi/utils/textProcessors.py b/armi/utils/textProcessors.py
index de3760627..fcb6e1431 100644
--- a/armi/utils/textProcessors.py
+++ b/armi/utils/textProcessors.py
@@ -379,7 +379,7 @@ def __exit__(self, exc_type, exc_value, traceback):
if self._stream is not None:
try:
self._stream.close()
- except: # noqa: bare-except
+ except Exception:
# We really don't care if anything fails here, plus an exception in exit is ignored anyway
pass
self._stream = None
diff --git a/armi/resources/images/TerraPowerLogo.png b/doc/.static/TerraPowerLogo.png
similarity index 100%
rename from armi/resources/images/TerraPowerLogo.png
rename to doc/.static/TerraPowerLogo.png
diff --git a/armi/resources/images/TerraPowerLogo_Large.png b/doc/.static/TerraPowerLogo_Large.png
similarity index 100%
rename from armi/resources/images/TerraPowerLogo_Large.png
rename to doc/.static/TerraPowerLogo_Large.png
diff --git a/doc/.static/dochelpers.py b/doc/.static/dochelpers.py
index 217d5fa3f..72713f511 100644
--- a/doc/.static/dochelpers.py
+++ b/doc/.static/dochelpers.py
@@ -14,7 +14,30 @@
"""Helpers for Sphinx documentation."""
-def createTable(rst_table, caption=None, align=None, widths=None, width=None):
+def escapeSpecialCharacters(s):
+ """Escapes RST special characters in inputted string.
+
+ Special characters include: ``*|_``. More to be added when found troublesome.
+
+ Parameters
+ ----------
+ s : str
+ String with characters to be escaped.
+
+ Returns
+ -------
+ str
+ Input string with special characters escaped.
+ """
+ news = s[:]
+ for char in ["*", "|", "_"]:
+ news = news.replace(char, "\\" + char)
+ return news
+
+
+def createTable(
+ rst_table, caption=None, label=None, align=None, widths=None, width=None
+):
"""
This method is available within ``.. exec::``. It allows someone to create a table with a
caption.
@@ -22,6 +45,8 @@ def createTable(rst_table, caption=None, align=None, widths=None, width=None):
The ``rst_table``
"""
rst = [".. table:: {}".format(caption or "")]
+ if label:
+ rst += [" :name: {}".format(label)]
if align:
rst += [" :align: {}".format(align)]
if width:
@@ -136,8 +161,8 @@ def generateParamTable(klass, fwParams, app=None):
pluginContent = headerContent.format(srcName)
for pd in pdefs:
pluginContent += f""" * - {pd.name}
- - {pd.description}
- - {pd.units}
+ - {escapeSpecialCharacters(str(pd.description))}
+ - {escapeSpecialCharacters(pd.units)}
"""
content.append(pluginContent + "\n")
diff --git a/doc/.static/gridEditor.png b/doc/.static/gridEditor.png
new file mode 100644
index 000000000..e9bfcb486
Binary files /dev/null and b/doc/.static/gridEditor.png differ
diff --git a/doc/conf.py b/doc/conf.py
index d65645502..2275edc1a 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -224,7 +224,7 @@ def autodoc_skip_member_handler(app, what, name, obj, skip, options):
s = str(obj).strip()
if s.startswith("`_::
+The ARMI tests are meant to be run using `pytest `_
+locally ::
- $ pip install tox
- $ tox -- -n 6
-
-This runs the unit tests in parallel on 6 processes. Omit the ``-n 6`` argument
-to run on a single process.
-
-Or the tests can also be run using ``pytest`` directly::
-
- $ pip intall -e .[test]
+ $ pip install -e .[test]
$ pytest -n 4 armi
Submitting Changes
diff --git a/doc/developer/standards_and_practices.rst b/doc/developer/standards_and_practices.rst
index 4d099db0a..7b8641802 100644
--- a/doc/developer/standards_and_practices.rst
+++ b/doc/developer/standards_and_practices.rst
@@ -32,7 +32,18 @@ it is important to remember to us the ``black`` formatter before pushing any cod
to ARMI on github.com will be automatically checked to see if they conform to the ``black`` code formatter standards.
The ``black`` formatter provides 100% consistency in ARMI for: whitespace, line length, trailing commas, and string
-formatting.
+formatting. And it is easy to run on the command line::
+
+ black .
+
+Address the ruff warnings
+=========================
+ARMI also uses the amazing Python linter `ruff `_. Again, any new code you add must have
+zero ``ruff`` warnings or errors.
+
+This is very easy to run on the command line::
+
+ ruff check .
Remove commented-out code
=========================
@@ -80,7 +91,7 @@ functions, and methods and their signatures (the signature includes the paramete
* Variables that you designate as unused should be prefaced with an underscore (``_``).
* Do not use Python `reserved keywords `_ as variable names.
-* Try to use names that are pronounceable. (Well-established variable names from equations are exceptable.)
+* Try to use names that are pronounceable. (Well-established variable names from equations are acceptable.)
* Keep names concise and expressive. (An exception is test method names, which may be longer and more
descriptive.)
* Avoid abbreviations and acronyms, unless they are well understood by subject-matter experts (e.g. DB for database,
@@ -184,7 +195,7 @@ Avoid repeating code
====================
In other words, don't repeat yourself. (`D. R. Y. `_).
Repetitious code is harder to read, and harderd for others to update. If you ever find yourself copying and pasting
-code, consider pulling the repeated code out into it's own function, or using a loop.
+code, consider pulling the repeated code out into its own function, or using a loop.
Public methods should have docstrings
=====================================
@@ -233,7 +244,7 @@ Place a single line between each of these groups, for example:
import os
import math
- import numpy
+ import numpy as np
from matplotlib import pyplot
from armi import runLog
@@ -287,22 +298,23 @@ Input files
ARMI developers **shall** use one of the following well-defined, Python-supported, input file formats.
.json
- JSON files are used for a variety of data-object representations. There are some limitations of JSON, in that it
- does not easily support comments. JSON is also very strict.
+ JSON files are used for a variety of data-object representations. There are some limitations of
+ JSON, in that it does not easily support comments. JSON is also very strict.
.yaml
YAML files are like JSON files but can have comments in them.
-Address the ruff warnings
-=========================
-Our pull request system integrates with the automatic code checker, ruff. Any new code you add must have
-zero ruff warnings or errors.
-
General do's and don'ts
=======================
-do not use ``print``
+Do not use ``print``
ARMI code should not use the ``print`` function; use one of the methods within ``armi.runLog``.
Do not add new ``TODO`` statements in your commits and PRs.
- If your new ``TODO`` statement is important, it should be a GitHub Issue. Yes, we have existing ``TODO`` statements in the code, those are relic and need to be removed. Also, never mark the code with ``FIXME`` or ``XXX```; open a ticket.
+ If your new ``TODO`` statement is important, it should be a GitHub Issue. Yes, we have existing
+ ``TODO`` statements in the code, those are relic and need to be removed. Similarly, never mark
+ the code with ``FIXME`` or ``XXX```; open a ticket.
+
+Do not link GitHub tickets or PRs in code.
+ The idea in ARMI is that either something is worth documenting well in a docstring, or the docs,
+ or it is not. And just linking a ticket or PR in a docstring is not helpful.
diff --git a/doc/developer/tooling.rst b/doc/developer/tooling.rst
index a423ea6fc..998eef319 100644
--- a/doc/developer/tooling.rst
+++ b/doc/developer/tooling.rst
@@ -113,10 +113,11 @@ here is to help track all the important changes that happened in ARMI, so ARMI c
has changed during the next `release `_. To that end,
minor PRs won't require a release note.
-In particular, in the release notes, you will find four sections in the releasee notes:
+In particular, in the release notes, you will find four sections in the release notes:
1. **New Features** - A new feature (or major addition to a current feature) was added to the code.
-2. **API Changes** - ANY change to the public-facing API of ARMI.
+2. **API Changes** - ANY breaking change to the public-facing API of ARMI. (A breaking change is
+ when you change the existing API, not when you add something new to the API.)
3. **Bug Fixes** - ANY bug fix in the code (not the documentation), no matter how minor.
4. **Changes that Affect Requirements** - If you touch the code (``impl``) or test (``test``) for
anything that currently has a requirement crumb. (This must be a non-trivial change.)
@@ -147,7 +148,7 @@ like unit testing or building documentation.
Third-Party Licensing
---------------------
Be careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not
-to include anything with a license that superceeds our Apache license. For instance,
+to include anything with a license that supercedes our Apache license. For instance,
any third-party Python library included in ARMI with a GPL license will make the whole
project fall under the GPL license. But a lot of potential users of ARMI will want to
keep some of their work private, so we can't allow any GPL tools.
@@ -164,7 +165,7 @@ might look like ``0.1.7``, ``1.0.0``, or ``12.3.123``. Each number has a specifi
* ``minor`` - Revved when we decide the code or our API has reached a stable point.
* ``bump`` - Revved every time we modify the API, and any other time we want.
-**NOTE**: Changes to documenation or testing probably do not deserve a version bump.
+**NOTE**: Changes to documentation or testing probably do not deserve a version bump.
**Any change to a major or minor version is considered a release.**
@@ -180,28 +181,108 @@ Every release should follow this process:
3. Add release notes to the documentation:
`here `__.
4. Tag the commit after it goes into the repo:
- - From this commit: ``git tag -a 1.0.0 -m "Release v1.0.0"``
- - Or from another commit: ``git tag 1.0.0 -m "Release v1.0.0"``
- - Pushing to the repo: ``git push origin 1.0.0``
- - **NOTE** - The ONLY tags in the ARMI repo are for official version releases.
+
+ - From this commit: ``git tag -a 1.0.0 -m "Release v1.0.0"``
+ - Or from another commit: ``git tag 1.0.0 -m "Release v1.0.0"``
+ - Pushing to the repo: ``git push origin 1.0.0``
+ - **NOTE** - The ONLY tags in the ARMI repo are for official version releases.
5. Also add the release notes on `the GitHub UI `__.
6. Follow the instructions `here `_ to
archive the new documentation.
7. Tell everyone!
-Module-Level Logging
-====================
-In most of the modules in ``armi``, you will see logging using the ``runLog`` module.
-This is a custom, global logging object provided by the import:
+Logging with runLog
+===================
+ARMI provides a logging tool, ``runLog``, to be used in place of ``print`` for all logging during a
+simulation. It is very easy to use:
.. code-block:: python
from armi import runLog
-If you want a logger specific to a single module, say to provide debug logging for only
-one module, that functionality is provided by what might look like a bare Python logging
-import, but is actually calling the same underlying ``armi`` logging tooling:
+ runLog.debug("This will only be seen if you run in debug mode.")
+ runLog.info("Default log level.")
+ runLog.error("The run will die, or the results are invalid.")
+
+.. note::
+ Calling ``runLog.error()`` is not the same as calling Python's ``raise error``; a log statement
+ does not kill a run, or raise an error, it just puts some text in the log.
+
+When an ARMI simulation is run, it will be run at a particular log level. All log messages that are
+at or above that log level will be seen during the simulation and in the final log files. To control
+the log level of an ARMI run, you use the setting ``verbosity`` in your settings file. You will
+probably be running ARMI in a parallel mode, and if you want the child processes to have a different
+log level than the main process, you can set ``branchVerbosity`` to the desired verbosity of all the
+child processes.
+
+For reference, here are the log levels that ARMI supports:
+
+.. list-table::
+ :widths: 20 20 60
+ :header-rows: 1
+
+ * - Level
+ - Value
+ - When to Use
+ * - debug
+ - 10
+ - This will only be seen if the simulation is run in debug mode.
+ * - extra
+ - 15
+ - More detailed than will normally be seen in a usual simulation.
+ * - info
+ - 20
+ - Use only for things that important enough to be visible during every normal simulation.
+ * - important
+ - 25
+ - More important than the default log level, but not a problem or issue.
+ * - prompt
+ - 27
+ - RESERVED for the ARMI CLI.
+ * - warning
+ - 30
+ - Use ONLY for issues that may or may not invalidate the simulation results.
+ * - error
+ - 40
+ - Use ONLY for problems that halt the program or invalidate the simulation results.
+ * - header
+ - 100
+ - Use ONLY to define major sections in the log files.
+
+
+Blocking Duplicate Logs
+-----------------------
+Sometimes you want to add a log message, but based on program logic it might pop up in the final log
+file multiple times, even thousands of times. And probably you do not want that. Happily, the
+``runLog`` tool provides a simple argument that will stop a single log line from being logged more
+than once.
+
+Here is a (silly) example of a heavily duplicate log message:
+
+.. code-block:: python
+
+ for _i in range(1000):
+ runLog.warning("Something wicked this way comes.")
+
+That log message gets printed 1,000 times, but we can ensure it is only printed once:
+
+.. code-block:: python
+
+ for _i in range(1000):
+ runLog.warning("Something wicked this way comes.", single=True)
+
+Obviously, this will not be useful in every scenario. But it is a handy tool to clean up your log
+files.
+
+
+Module-Level Logging
+--------------------
+The ``runLog`` tool also allows for you to log one module differently from the rest of the code
+base. For instance, you could set the log level to "debug" in just one Python file, to help testing
+during development.
+
+That functionality is provided by what might look like a bare Python logging import, but is actually calling the same underlying ``armi`` logging tooling:
.. code-block:: python
@@ -212,10 +293,10 @@ In either case, you can then log using the same, easy interface:
.. code-block:: python
- runLog.info('information here')
- runLog.error('extra error info here')
+ runLog.info('Normal stuff.')
+ runLog.error('Oh no!')
-Finally, you can change the logging level in either above scenario by doing:
+Finally, you can change the logging level in the above scenario by doing:
.. code-block:: python
diff --git a/doc/gallery-src/analysis/run_hexReactorToRZ.py b/doc/gallery-src/analysis/run_hexReactorToRZ.py
index 1baa4c7e2..89b3e6279 100644
--- a/doc/gallery-src/analysis/run_hexReactorToRZ.py
+++ b/doc/gallery-src/analysis/run_hexReactorToRZ.py
@@ -55,3 +55,4 @@
figs = converter.plotConvertedReactor()
plt.show()
+plt.close()
diff --git a/doc/gallery-src/framework/run_blockVolumeFractions.py b/doc/gallery-src/framework/run_blockVolumeFractions.py
index 993cb6ba0..340befc9c 100644
--- a/doc/gallery-src/framework/run_blockVolumeFractions.py
+++ b/doc/gallery-src/framework/run_blockVolumeFractions.py
@@ -17,18 +17,16 @@
Computing Component Volume Fractions on a Block with Automatic Thermal Expansion
================================================================================
-Given an :py:mod:`Block `, compute
-the component volume fractions. Assess the change in volume
-of these components within the block as the temperatures of
-the fuel and structure components are uniformly increased.
+Given an :py:mod:`Block `, compute the component volume fractions. Assess
+the change in volume of these components within the block as the temperatures of the fuel and
+structure components are uniformly increased.
-Note: Thermal expansion is automatically considered with
-material data defined within :py:mod:`materials `.
+Note: Thermal expansion is automatically considered with material data defined within
+:py:mod:`materials `.
"""
# ruff: noqa: E402
import collections
-import tabulate
import matplotlib.pyplot as plt
from armi import configure
@@ -37,13 +35,14 @@
from armi.reactor.flags import Flags
from armi.reactor.tests.test_blocks import buildSimpleFuelBlock
+from armi.utils import tabulate
def writeInitialVolumeFractions(b):
"""Write out the initial temperatures and component volume fractions."""
headers = ["Component", "Temperature, °C", "Volume Fraction"]
data = [(c, c.temperatureInC, volFrac) for c, volFrac in b.getVolumeFractions()]
- print(tabulate.tabulate(tabular_data=data, headers=headers) + "\n")
+ print(tabulate.tabulate(data=data, headers=headers) + "\n")
def plotVolFracsWithComponentTemps(b, uniformTemps):
diff --git a/doc/gallery-src/framework/run_computeReactionRates.py b/doc/gallery-src/framework/run_computeReactionRates.py
index 8639cdb71..1c73f7e37 100644
--- a/doc/gallery-src/framework/run_computeReactionRates.py
+++ b/doc/gallery-src/framework/run_computeReactionRates.py
@@ -80,7 +80,7 @@ def createDummyReactor():
# Create a single fuel assembly
a = assemblies.HexAssembly("fuel assembly")
- a.spatialGrid = grids.axialUnitGrid(1)
+ a.spatialGrid = grids.AxialGrid.fromNCells(1)
a.spatialLocator = r.core.spatialGrid[1, 0, 0]
# Create a single fuel block
diff --git a/doc/gallery-src/framework/run_fuelManagement.py b/doc/gallery-src/framework/run_fuelManagement.py
index 39326872a..44a18df1c 100644
--- a/doc/gallery-src/framework/run_fuelManagement.py
+++ b/doc/gallery-src/framework/run_fuelManagement.py
@@ -13,7 +13,6 @@
# limitations under the License.
"""
Fuel management in a LWR.
-=========================
Demo of locating and swapping assemblies in a core with Cartesian geometry. Given a burnup
distribution, this swaps high burnup assemblies with low ones.
@@ -31,6 +30,8 @@
# sphinx_gallery_thumbnail_number = 2
import math
+import matplotlib.pyplot as plt
+
from armi import configure
from armi.physics.fuelCycle import fuelHandlers
from armi.reactor.flags import Flags
@@ -72,3 +73,4 @@
# show final burnup distribution
plotting.plotFaceMap(reactor.core, param="percentBu")
+plt.close()
diff --git a/doc/index.rst b/doc/index.rst
index b8f039dc4..fb39ea918 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -9,10 +9,10 @@ ARMI
:maxdepth: 2
installation
- gallery/index
- tutorials/index
user/index
developer/index
+ gallery/index
+ tutorials/index
release/index
glossary
API Docs <.apidocs/modules>
@@ -26,4 +26,3 @@ ARMI
* :ref:`modindex`
* :ref:`search`
-
diff --git a/doc/release/0.3.rst b/doc/release/0.3.rst
index f241dff6f..0bdb4ac1a 100644
--- a/doc/release/0.3.rst
+++ b/doc/release/0.3.rst
@@ -2,39 +2,10 @@
ARMI v0.3 Release Notes
***********************
-ARMI v0.3.1
-===========
-Release Date: TBD
-
-New Features
-------------
-#. TBD
-
-API Changes
------------
-#. Renaming ``structuredgrid.py`` to camelCase. (`PR#1650 `_)
-#. Removing unused argument from ``Block.coords()``. (`PR#1651 `_)
-#. Removing unused method ``HexGrid.allPositionsInThird()``. (`PR#1655 `_)
-#. Removed unused methods: ``Reactor.getAllNuclidesIn()``, ``plotTriangleFlux()``. (`PR#1656 `_)
-#. Removed ``armi.utils.dochelpers``; not relevant to nuclear modeling. (`PR#1662 `_)
-#. Removing old tools created to help people convert to the current database format: ``armi.bookkeeping.db.convertDatabase()`` and ``ConvertDB``. (`PR#1658 `_)
-#. TBD
-
-Bug Fixes
----------
-#. Fixed four bugs with "corners up" hex grids. (`PR#1649 `_)
-#. TBD
-
-Changes that Affect Requirements
---------------------------------
-#. Very minor change to ``Block.coords()``, removing unused argument. (`PR#1651 `_)
-#. Touched ``HexGrid`` by adding a "cornersUp" property and fixing two bugs. (`PR#1649 `_)
-#. TBD
-
ARMI v0.3.0
===========
-Release Date: 2024-01-26
+Release Date: 2024-02-02
What's new in ARMI?
-------------------
diff --git a/doc/release/0.4.rst b/doc/release/0.4.rst
new file mode 100644
index 000000000..973d91cf0
--- /dev/null
+++ b/doc/release/0.4.rst
@@ -0,0 +1,132 @@
+***********************
+ARMI v0.4 Release Notes
+***********************
+
+ARMI v0.4.1
+===========
+Release Date: TBD
+
+New Features
+------------
+#. ARMI now supports Python 3.12. (`PR#1813 `_)
+#. Removing the ``tabulate`` dependency by ingesting it to ``armi.utils.tabulate``. (`PR#1811 `_)
+#. Adding ``--skip-inspection`` flag to ``CompareCases`` CLI. (`PR#1842 `_)
+#. Allow merging a component with zero area into another component (`PR#1858 `_)
+#. Use ``Block.getNumPins()`` in ``HexBlock._rotatePins()``. (`PR#1859 `_)
+#. Provide utilities for determining location of a rotated object in a hexagonal lattice (``getIndexOfRotatedCell``). (`PR#1846 `_)
+#. Provide ``Parameter.hasCategory`` for quickly checking if a parameter is defined with a given category. (`PR#1899 `_)
+#. Provide ``ParameterCollection.where`` for efficient iteration over parameters who's definition matches a given condition. (`PR#1899 `_)
+#. Plugins can provide the ``getAxialExpansionChanger`` hook to customize axial expansion. (`PR#1870 `_)
+#. Alphabetizing ``Flags.toString()`` results. (`PR#1912 `_)
+#. Moving ``settingsValidation`` from ``operators`` to ``settings``. (`PR#1895 `_)
+#. Removing deprecated method ``prepSearch``. (`PR#1845 `_)
+#. Removing unused function ``SkippingXsGen_BuChangedLessThanTolerance``. (`PR#1845 `_)
+#. Allow for unknown Flags when opening a DB. (`PR#1844 `_)
+#. ``Assembly.rotatePins`` and ``Block.rotatePins`` have been removed. Prefer to use ``Assembly.rotate`` and ``Block.rotate``. (`PR#1846 `_)
+#. TBD
+
+Bug Fixes
+---------
+#. Fixed ``DerivedShape.getArea`` for ``cold=True``. (`PR#1831 `_)
+#. Fixed error parsing command line integers in ``ReportsEntryPoint``. (`PR#1824 `_)
+#. Fixed ``PermissionError`` when using ``syncDbAfterWrite``. (`PR#1857 `_)
+#. Fixed ``MpiDirectoryChanger``. (`PR#1853 `_)
+#. Changed data type of ``thKernel`` setting from ``bool`` to ``str`` in ``ThermalHydraulicsPlugin``. (`PR#1855 `_)
+#. Rotate hexagonal assembly patches correctly on facemap plots. (`PR#1883 `_)
+#. Update height of fluid components after axial expansion (`PR#1828 `_)
+#. Material theoretical density is serialized to and read from database. (`PR#1852 `_)
+#. Removed broken and unused column in ``summarizeMaterialData``. (`PR#1925 `_)
+#. TBD
+
+Quality Work
+------------
+#. Removing deprecated code ``axialUnitGrid``. (`PR#1809 `_)
+#. Refactoring ``axialExpansionChanger``. (`PR#1861 `_)
+#. TBD
+
+Changes that Affect Requirements
+--------------------------------
+#. TBD
+
+
+ARMI v0.4.0
+===========
+Release Date: 2024-07-29
+
+New Features
+------------
+#. Conserve mass by component in ``assembly.setBlockMesh()``. (`PR#1665 `_)
+#. Removal of the ``Block.reactor`` property. (`PR#1425 `_)
+#. System information is now also logged on Linux. (`PR#1689 `_)
+#. Reset ``Reactor`` data on worker processors after every interaction to free memory from state distribution.
+ (`PR#1729 `_ and `PR#1750 `_)
+#. Density can be specified for components via ``custom isotopics`` in the blueprints. (`PR#1745 `_)
+#. Implement a new ``JaggedArray`` class that handles HDF5 interface for jagged data. (`PR#1726 `_)
+
+API Changes
+-----------
+#. Replacing the concrete material with a better reference. (`PR#1717 `_)
+#. Adding more detailed time information to logging. (`PR#1796 `_)
+#. Renaming ``structuredgrid.py`` to camelCase. (`PR#1650 `_)
+#. Removing unused argument from ``Block.coords()``. (`PR#1651 `_)
+#. Removing unused method ``HexGrid.allPositionsInThird()``. (`PR#1655 `_)
+#. Removed unused methods: ``Reactor.getAllNuclidesIn()``, ``plotTriangleFlux()``. (`PR#1656 `_)
+#. Removed ``armi.utils.dochelpers``; not relevant to nuclear modeling. (`PR#1662 `_)
+#. Removing old tools created to help people convert to the current database format: ``armi.bookkeeping.db.convertDatabase()`` and ``ConvertDB``. (`PR#1658 `_)
+#. Removing the unused method ``Case.buildCommand()``. (`PR#1773 `_)
+#. Removed the variable ``armi.physics.neutronics.isotopicDepletion.ORDER``. (`PR#1671 `_)
+#. Removing extraneous ``ArmiOjbect`` methods. (`PR#1667 `_)
+ * Moving ``ArmiObject.getBoronMassEnrich()`` to ``Block``.
+ * Moving ``ArmiObject.getPuMoles()`` to ``Block``.
+ * Moving ``ArmiObject.getUraniumMassEnrich()`` to ``Block``.
+ * Removing ``ArmiObject.getMaxUraniumMassEnrich.()``.
+ * Removing ``ArmiObject.getMaxVolume()`` & ``Block.getMaxVolume()``.
+ * Removing ``ArmiObject.getPuFrac()``.
+ * Removing ``ArmiObject.getPuMass()``.
+ * Removing ``ArmiObject.getPuN()``.
+ * Removing ``ArmiObject.getZrFrac()``.
+ * Removing ``ArmiObject.printDensities()``.
+ * Moving ``Composite.isOnWhichSymmetryLine()`` to ``Assembly``.
+ * Removing ``Block.isOnWhichSymmetryLine()``.
+#. Removing the ``Block.reactor`` property. (`PR#1425 `_)
+#. Moving several ``ArmiObject`` methods. (`PR#1425 `_)
+ * Moving ``ArmiObject.getNeutronEnergyDepositionConstants`` to ``Block``.
+ * Moving ``ArmiObject.getGammaEnergyDepositionConstants`` to ``Block``.
+ * Moving ``ArmiObject.getTotalEnergyGenerationConstants`` to ``Block``.
+ * Moving ``ArmiObject.getFissionEnergyGenerationConstants`` to ``Block``.
+ * Moving ``ArmiObject.getCaptureEnergyGenerationConstants`` to ``Block``.
+#. Removing the parameter ``rdIterNum``. (`PR#1704 `_)
+#. Removing the parameters ``outsideFuelRing`` and ``outsideFuelRingFluxFr``. (`PR#1700 `_)
+#. Removing the setting ``doOrificedTH``. (`PR#1706 `_)
+#. Changing the Doppler constant params to ``VOLUME_INTEGRATED``. (`PR#1659 `_)
+#. Change ``Operator._expandCycleAndTimeNodeArgs`` to be a non-static method. (`PR#1766 `_)
+#. Database now writes state at the last time node of a cycle rather than during the ``DatabaseInterface.interactEOC`` interaction. (`PR#1090 `_)
+
+Bug Fixes
+---------
+#. Fixed four bugs with "corners up" hex grids. (`PR#1649 `_)
+#. Fixed ``safeCopy`` to work on both Windows and Linux with strict permissions (`PR#1691 `_)
+#. When creating a new XS group, inherit settings from initial group. (`PR#1653 `_, `PR#1751 `_)
+#. Fixed a bug with ``Core.getReactionRates``. (`PR#1771 `_)
+#. Fixed a bug with interactive versus batch mode checking on windows versus linux. (`PR#1786 `_)
+
+Quality Work
+------------
+#. Creating a single-block test reactor, to speed up unit tests. (`PR#1737 `_)
+#. Supporting MacOS in CI. (`PR#1713 `_)
+#. We now enforce a maximum line length of 120 characters, using ``ruff``. (`PR#1646 `_)
+#. Updating ``ruff`` to version ``0.5.1``. (`PR#1770 `_)
+#. Move ``.coveragerc`` file information into ``pyproject.toml``. (`PR#1692 `_)
+
+Changes that Affect Requirements
+--------------------------------
+#. Very minor change to ``Block.coords()``, removing unused argument. (`PR#1651 `_)
+#. Touched ``HexGrid`` by adding a "cornersUp" property and fixing two bugs. (`PR#1649 `_)
+#. Very slightly modified the implementation of ``Assembly.add()``. (`PR#1670 `_)
diff --git a/doc/tutorials/armi-example-app b/doc/tutorials/armi-example-app
index 60becb513..9279d8b9f 160000
--- a/doc/tutorials/armi-example-app
+++ b/doc/tutorials/armi-example-app
@@ -1 +1 @@
-Subproject commit 60becb5137cf3c3671ebd44b06b894287611c181
+Subproject commit 9279d8b9f69ccfd66be5501944d1cf2f004daf6f
diff --git a/doc/tutorials/walkthrough_lwr_inputs.rst b/doc/tutorials/walkthrough_lwr_inputs.rst
index e4fa3def6..3afde1791 100644
--- a/doc/tutorials/walkthrough_lwr_inputs.rst
+++ b/doc/tutorials/walkthrough_lwr_inputs.rst
@@ -6,9 +6,9 @@ In the :doc:`previous tutorial `,
we introduced the basic input files and made a full
input for a sodium-cooled fast reactor. In this tutorial, we will build simple
inputs for the light-water reactor (LWR) benchmark problem called C5G7 as defined
-in `NEA/NSC/DOC(2003)16 `_.
+in `NEA/NSC/DOC(2003)16 `_.
The compositions are documented in
-`NEA/NSC/DOC(98)2 `_.
+`NEA/NSC/DOC(96)2 `_.
.. tip:: The full inputs created in this tutorial are available for download at the bottom of
this page.
@@ -37,7 +37,8 @@ Custom isotopic vectors
-----------------------
When using materials that differ in properties or composition from the
materials in the ARMI material library, you can use custom isotopics
-to specify their composition.
+to specify their composition. The composition details below are documented in Table 2 of
+`NEA/NSC/DOC(96)2 `_.
.. literalinclude:: ../../armi/tests/tutorials/c5g7-blueprints.yaml
diff --git a/doc/user/assembly_parameters_report.rst b/doc/user/assembly_parameters_report.rst
index 964815fbe..e06ec2790 100644
--- a/doc/user/assembly_parameters_report.rst
+++ b/doc/user/assembly_parameters_report.rst
@@ -4,7 +4,8 @@
Assembly Parameters
*******************
-This document lists all of the Assembly Parameters that are provided by the ARMI Framework.
+This document lists all of the :py:mod:`Assembly Parameters ` that are provided by the
+ARMI Framework. See :py:mod:`armi.reactor.parameters` for use.
.. exec::
from armi.reactor import assemblies
diff --git a/doc/user/block_parameters_report.rst b/doc/user/block_parameters_report.rst
index 1c54b1aa9..96358612d 100644
--- a/doc/user/block_parameters_report.rst
+++ b/doc/user/block_parameters_report.rst
@@ -4,7 +4,8 @@
Block Parameters
****************
-This document lists all of the Block Parameters that are provided by the ARMI Framework.
+This document lists all of the :py:mod:`Block Parameters ` that are provided by the ARMI
+Framework. See :py:mod:`armi.reactor.parameters` for use.
.. exec::
from armi.reactor import blocks
diff --git a/doc/user/component_parameters_report.rst b/doc/user/component_parameters_report.rst
index 75d356680..3e28e765e 100644
--- a/doc/user/component_parameters_report.rst
+++ b/doc/user/component_parameters_report.rst
@@ -4,7 +4,8 @@
Component Parameters
********************
-This document lists all of the Component Parameters that are provided by the ARMI Framework.
+This document lists all of the :py:mod:`Component Parameters ` that are
+provided by the ARMI Framework. See :py:mod:`armi.reactor.parameters` for use.
.. exec::
from armi.reactor.components import Component
diff --git a/doc/user/core_parameters_report.rst b/doc/user/core_parameters_report.rst
index dc0ad5c4b..ee5c60d35 100644
--- a/doc/user/core_parameters_report.rst
+++ b/doc/user/core_parameters_report.rst
@@ -4,7 +4,8 @@
Core Parameters
***************
-This document lists all of the Core Parameters that are provided by the ARMI Framework.
+This document lists all of the Core Parameters that are provided by the ARMI Framework. See
+:py:mod:`armi.reactor.parameters` for use.
.. exec::
from armi.reactor import reactors
diff --git a/doc/user/inputs.rst b/doc/user/inputs.rst
index 01212e1b3..d3d460fc9 100644
--- a/doc/user/inputs.rst
+++ b/doc/user/inputs.rst
@@ -2,37 +2,51 @@
Inputs
******
-ARMI input files define the initial state of the reactor model and tell ARMI what kind of analysis should be
-performed on it.
+ARMI input files define the initial state of the reactor model and tell ARMI what kind of analysis
+should be performed on it.
-.. note:: We have a :ref:`walkthrough-inputs` tutorial for a quick
- overview of the inputs.
+.. note:: We have a :ref:`walkthrough-inputs` tutorial for a quick overview of the inputs.
There are several input files:
Settings file
- Contains simulation parameters (like full power, cycle length, and which physics modules to
- activate) and all kind of modeling approximation settings (e.g. convergence criteria)
+ Contains simulation parameters (like full power, cycle length, and which physics modules to
+ activate) and all kind of modeling approximation settings (e.g. convergence criteria)
Blueprints file
- Contains dimensions and composition of the components/blocks/assemblies in your reactor systems, from fuel
- pins to heat exchangers
-
+ Contains dimensions and composition of the components/blocks/assemblies in your reactor systems,
+ from fuel pins to heat exchangers
+
Fuel management file
- Describes how fuel moves around during a simulation
+ Describes how fuel moves around during a simulation
-Depending on the type of analysis, there may be additional inputs required. These include things like
-control logic, ex-core models for transients and shielding, etc.
+Depending on the type of analysis, developers may create other input files for things like: control
+logic, ex-core models for transients and shielding, etc.
-The core map input files can be graphically manipulated with the
-:py:mod:`Grid editor `.
+
+YAML Files
+==========
+ARMI's input files all use the `YAML `_ format. This is a well-
+known file format, chosen because it is human-readable and easy to hand-write. That being said,
+there are two details about the YAML format that are important to know:
+
+Ordering
+ YAML is not order specific; however, one of the techniques used to limit the size of the input
+ includes using YAML anchors to resuse block and component definitions. YAML anchors (e.g.
+ ``&block_name``) must be defined before their corresponding alias (e.g. ``*block_name``) used.
+
+Duplicate Keys
+ YAML allows for duplicate keys. However, in ARMI, duplicates might be erroneous. Unfortunately,
+ because the international YAML specification allows for duplicates, none of the YAML-parsing
+ libraries see it as an error. You will have to hand-verify your inputs are correct.
The Settings Input File
=======================
-The **settings** input file defines a series of key/value pairs the define various information about the system you are
-modeling as well as which modules to run and various modeling/approximation settings. For example, it includes:
+The **settings** input file defines a series of key/value pairs the define various information about
+the system you are modeling as well as which modules to run and various modeling/approximation
+settings. For example, it includes:
* The case title
* The reactor power
@@ -48,11 +62,12 @@ This file is a YAML file that you can edit manually with a text editor or with t
Here is an excerpt from a settings file:
-.. literalinclude:: ../../../armi/tests/armiRun.yaml
+.. literalinclude:: ../../armi/tests/armiRun.yaml
:language: yaml
:lines: 3-15
-A full listing of settings available in the framework may be found in the `Table of all global settings <#settings-report>`_ .
+A full listing of settings available in the framework may be found in the
+`Table of all global settings <#settings-report>`_ .
Many settings are provided by the ARMI Framework, and others are defined by various plugins.
@@ -60,13 +75,14 @@ Many settings are provided by the ARMI Framework, and others are defined by vari
The ARMI GUI
------------
-The ARMI GUI may be used to manipulate many common settings (though the GUI can't change all of the settings). The GUI
-also enables the graphical manipulation of a reactor core map, and convenient automation of commands required to submit to a
-cluster. The GUI is a front-end to
-these files. You can choose to use the GUI or not, ARMI doesn't know or care --- it just reads these files and runs them.
+The ARMI GUI may be used to manipulate many common settings (though the GUI can't change all of the
+settings). The GUI also enables the graphical manipulation of a reactor core map, and convenient
+automation of commands required to submit to a cluster. The GUI is a front-end to these files. You
+can choose to use the GUI or not, ARMI doesn't know or care --- it just reads these files and runs
+them.
-Note that one settings input file is required for each ARMI case, though many ARMI cases can refer to the same
-Blueprints, Core Map, and Fuel Management inputs.
+Note that one settings input file is required for each ARMI case, though many ARMI cases can refer
+to the same Blueprints, Core Map, and Fuel Management inputs.
.. tip:: The ARMI GUI is not yet included in the open-source ARMI framework
@@ -172,7 +188,7 @@ In the case that only a single state is to be examined (i.e. no burnup), the use
In the case of burnup, the reactor cycle history may be specified using either the simple or detailed
option.
The simple cycle history consists of the following case settings:
-
+
* ``power``
* ``nCycles`` (default = 1)
* ``burnSteps`` (default = 4)
@@ -200,7 +216,7 @@ An example simple cycle history might look like
Note the use of the special shorthand list notation, where repeated values in a list can be specified using an "R" followed by the number of times the value is to be repeated.
The above scheme would represent 3 cycles of operation:
-
+
1. 100% power for 90 days, split into two segments of 45 days each, followed by 10 days shutdown (i.e. 90% capacity)
2. 50% power for 30 days, split into two segments of 15 days each, followed by 70 days shutdown (i.e. 15% capacity)
@@ -211,8 +227,8 @@ In each cycle, criticality calculations will be performed at 3 nodes evenly-spac
This input format can be useful for quick scoping and certain types of real analyses, but clearly has its limitations.
To overcome these limitations, the detailed cycle history, consisting of the ``cycles`` setting may be specified instead.
-For each cycle, an entry to the ``cycles`` list is made with the following optional fields:
-
+For each cycle, an entry to the ``cycles`` list is made with the following optional fields:
+
* ``name``
* ``power fractions``
* ``cumulative days``, ``step days``, or ``burn steps`` + ``cycle length``
@@ -267,7 +283,7 @@ Restart cases
^^^^^^^^^^^^^
Oftentimes the user is interested in re-examining just a specific set of time nodes from an existing run.
In these cases, it is sometimes not necessary to rerun an entire reactor history, and one may instead use one of the following options:
-
+
1. Snapshot, where the reactor state is loaded from a database and just a single time node is run.
2. Restart, where the cycle history is loaded from a database and the calculation continues through the remaining specified time history.
@@ -286,7 +302,7 @@ To run a snapshot, the following settings must be added to your case settings:
An example of a snapshot run input:
.. code-block:: yaml
-
+
runType: Snapshots
reloadDBName: my-old-results.h5
dumpSnapshot: ['000000', '001002'] # would produce 2 snapshots, at BOL and at node 2 of cycle 1
@@ -300,10 +316,10 @@ To run a restart, the following settings must be added to your case settings:
* If you would like to change the specified reactor history (see :ref:`restart-cases`), keep the history up to the restarting cycle/node unchanged, and just alter the history after that point. This means that the cycle history specified in your restart run should include all cycles/nodes up to the end of the simulation. For complicated restarts, it may be necessary to use the detailed ``cycles`` setting, even if the original case only used the simple history option.
A few examples of restart cases:
-
+
- Restarting a calculation at a specific cycle/node and continuing for the remainder of the originally-specified cycle history:
.. code-block:: yaml
-
+
# old settings
nCycles: 2
burnSteps: 2
@@ -313,7 +329,7 @@ A few examples of restart cases:
loadingFile: my-blueprints.yaml
.. code-block:: yaml
-
+
# restart settings
nCycles: 2
burnSteps: 2
@@ -326,7 +342,7 @@ A few examples of restart cases:
- Add an additional cycle to the end of a case:
.. code-block:: yaml
-
+
# old settings
nCycles: 1
burnSteps: 2
@@ -336,7 +352,7 @@ A few examples of restart cases:
loadingFile: my-blueprints.yaml
.. code-block:: yaml
-
+
# restart settings
nCycles: 2
burnSteps: 2
@@ -349,7 +365,7 @@ A few examples of restart cases:
- Restart but cut the reactor history short:
.. code-block:: yaml
-
+
# old settings
nCycles: 3
burnSteps: 2
@@ -359,7 +375,7 @@ A few examples of restart cases:
loadingFile: my-blueprints.yaml
.. code-block:: yaml
-
+
# restart settings
nCycles: 2
burnSteps: 2
@@ -372,7 +388,7 @@ A few examples of restart cases:
- Restart with a different number of steps in the third cycle using the detailed ``cycles`` setting:
.. code-block:: yaml
-
+
# old settings
nCycles: 3
burnSteps: 2
@@ -382,7 +398,7 @@ A few examples of restart cases:
loadingFile: my-blueprints.yaml
.. code-block:: yaml
-
+
# restart settings
nCycles: 3
cycles:
@@ -462,7 +478,7 @@ The ARMI data model is represented schematically below, and the blueprints are d
Defines :py:class:`~armi.reactor.components.component.Component` inputs for a
:py:class:`~armi.reactor.blocks.Block`.
-:ref:`asssemblies `:
+:ref:`assemblies `:
Defines vertical stacks of blocks used to define the axial profile of an
:py:class:`~armi.reactor.assemblies.Assembly`.
@@ -481,11 +497,8 @@ The ARMI data model is represented schematically below, and the blueprints are d
:ref:`custom isotopics `:
Special setting: defines user-specified isotopic compositions.
-.. warning::
-
- YAML is not order specific; however, one of the techniques used to limit the size of the input
- includes using YAML anchors to resuse block and component definitions. YAML anchors (e.g.
- ``&block_name``) must be defined before their corresponding alias (e.g. ``*block_name``) used.
+The core map input files can be graphically manipulated with the
+:py:mod:`Grid editor `.
.. _blocks-and-components:
@@ -542,7 +555,7 @@ Component name
The component name (``fuel``) is specified at the top. Some physics kernels interpret names specially, so
pay attention to any naming conventions. As a general rule, you can expect that people will be doing regex
on your name, so you should not use any of these characters in your component names:
- ``. ^ $ * + ? { } [ ] \ | ( ) :``.
+ ``. ^ $ * + ? { } [ ] \ | ( ) :``.
shape
The shape will be extruded to the length specified in the ``assemblies`` input section below. ARMI contains
@@ -564,8 +577,6 @@ material
The temperature (in C) that the component dimensions will be thermal expanded to (using material properties based on
the ``material`` input). To disable automatic thermal expansion, set |Tinput| and |Thot| both to the same value
- .. note:: The T/H modules of ARMI will update the hot temperature when coupling is activated.
-
mult
Multiplicity specifies how many duplicates of this component exist in this block. If you want 169 pins per assembly,
this would be 169. This does not explicitly describe the location of the pins. Note that many fast-neutron systems
@@ -582,7 +593,7 @@ od
Component Types
^^^^^^^^^^^^^^^
Each component has a variety of dimensions to define the shape and composition. All dimensions are
-in cm. The following is a list of included component shapes and their dimension inputs. Again,
+in cm. The following is a list of included component shapes and their dimension inputs. Again,
additional/custom components with arbitrary dimensions may be provided by the user via plugins.
.. exec::
@@ -729,7 +740,7 @@ Once components and blocks are defined, Assemblies can be created as extruded st
bottom to top. The assemblies use YAML anchors to refer to the blocks defined in the previous section.
.. note:: We aren't happy with the use of anchors to refer to blocks, and plan to change it (back) to just using the
- block names directly. However, the use of anchors for input to be applied to multiple assemblies (e.g. heights) is
+ block names directly. However, the use of anchors for input to be applied to multiple assemblies (e.g. heights) is
quite nice.
A complete definition of an inner-core assembly may be seen below::
@@ -749,15 +760,14 @@ A complete definition of an inner-core assembly may be seen below::
nozzleType: Inner
xs types: [A, B, C, D, E, F]
-.. note:: While component dimensions are entered as cold dimensions, axial heights must
- be entered as hot dimensions. The reason for this is that each component with different
- material will thermally expand at different rates. In the axial dimension, this is
- problematic because after a change in temperature each component in the same block
- will have a different height. The solution is to pre-expand each component
- axially and enter hot axial block heights. After the reactor is created, further
- temperature changes will cause dimension changes only in 2 dimensions (radially). Mass
- is always conserved, but if temperature deviates significantly from hot axial heights,
- density may deviate as well.
+.. note::
+ While component dimensions are entered as cold dimensions, axial heights may be entered as
+ either cold or hot dimensions. In older versions of ARMI, it was required to enter heights
+ in the hot dimension (this behavior is preserved by setting `inputHeightsConsideredHot: True`).
+ However, with the
+ :py:class:`axial expansion changer `,
+ heights may be entered at cold temperatures (`inputHeightsConsideredHot: False`). Each Assembly will then
+ be expanded to its hot dimensions upon construction.
For many cases, a shared height and axial mesh point definition is sufficient. These can be included
globally as shown above and linked with anchors, or specified explicitly.
@@ -769,24 +779,25 @@ specifier
hex dragger.
xs types
- The **cross-section type** is a single capital letter that identifies which cross section (XS) set
- will be applied to this block. Each cross section set must be defined for at least one block with
- fissile fuel. When the lattice physics code executes in ARMI, it determines the representative
- blocks from each cross section type and burnup group and runs it to create the cross section set
- for all blocks of the same type and in the same burnup group. Generally, it is best to set blocks
- that have much different compositions to have separate cross section types. The tradeoff is that
- the more XS types you define, the more CPU time the case will take to run.
+ The **cross-section type** is usually a single capital letter that identifies which cross section
+ (XS) set will be applied to the block. Each cross section set must be defined for at least one
+ block with fissile fuel. When the lattice physics code executes in ARMI, it determines the
+ representative blocks from each cross section type and burnup group and runs it to create the
+ cross section set for all blocks of the same type and in the same burnup group. Generally, it is
+ best to set blocks that have much different compositions to have separate cross section types. The
+ tradeoff is that the more XS types you define, the more CPU time the case will take to run.
+
+ Representing xsType by a single letter (A-Z) or number (0-9) limits users to 36 groups. So ARMI
+ will allow 2-letter xsType designations if and only if the ``buGroups`` setting has length 1 (i.e. no burnup groups are defined). This is useful for high-fidelity XS modeling.
axial mesh points
- Blocks will be broken up into this many uniform mesh points in the
- deterministic neutronics solvers (e.g. DIF3D). This allows you to define
- large blocks that have multiple flux points within them. You have to keep the
- neutronic mesh somewhat uniform in order to maintain numerical stability of
- the solvers. It is important to note that the axial mesh must be uniform
- throughout the core for many physics kernels, so be sure all block interfaces
- are consistent among all assemblies in the core. Blocks deplete and get most
- state variables on the block mesh defined by the height specification.
- Provisions for multiple meshes for different physics are being planned.
+ Blocks will be broken up into this many uniform mesh points in the deterministic neutronics
+ solvers (e.g. DIF3D). This allows you to define large blocks that have multiple flux points within
+ them. You have to keep the neutronic mesh somewhat uniform in order to maintain numerical
+ stability of the solvers. It is important to note that the axial mesh must be uniform throughout
+ the core for many physics kernels, so be sure all block interfaces are consistent among all
+ assemblies in the core. Blocks deplete and get most state variables on the block mesh defined by
+ the height specification. Provisions for multiple meshes for different physics are being planned.
hotChannelFactors
A label to define which set of hot channel factors (HCFs) get applied to
@@ -801,12 +812,12 @@ material modifications
These are a variety of modifications that are made to the
materials in blocks in these locations. It may include the fuel enrichment (mass frac.), poison
enrichment (mass frac.), zirconium mass frac, and any additional options required to fully define
- the material loaded in the component. The material definitions in the material library define
+ the material loaded in the component. The material definitions in the material library define
valid modifications for them.
.. exec::
from armi.materials import Material
- from tabulate import tabulate
+ from armi.utils.tabulate import tabulate
data = []
for m in Material.__subclasses__():
@@ -835,8 +846,8 @@ material modifications
data.sort(key=lambda t: t[0])
return tabulate(
headers=("Material Name", "Available Modifications"),
- tabular_data=data,
- tablefmt="rst",
+ data=data,
+ tableFmt="rst",
)
The class 1/class 2 modifications in fuel materials are used to identify mixtures of
@@ -1033,6 +1044,15 @@ Example grid definitions are shown below::
.. tip:: We have gone through some effort to allow both pin and core grid definitions to share this
input and it may improve in the future.
+You may set up some kinds of grids (e.g. 1/3 and full core hex or Cartesian core
+loadings) using our interactive graphical grid editor described more in
+:py:mod:`armi.utils.gridEditor`.
+
+.. figure:: /.static/gridEditor.png
+ :align: center
+
+ An example of the Grid Editor being used on a FFTF input file
+
.. _custom-isotopics:
Custom Isotopics
@@ -1062,6 +1082,30 @@ nuclear data library).
The (mass) ``density`` input is invalid when specifying ``number densities``; the code will present an error message.
+Material density may be specified in custom isotopics either explicitly in a ``mass fractions`` input
+format (shown above) or implicitly with ``number densities``. This is fairly straightforward for the
+``Custom`` material, as it has no baseline density. Density may also be specified for components using
+materials which have entries in the materials library. Users should be aware of the following interactions
+when specifying a custom density for components using a library material:
+
+ 1. The library material density will not be changed. Only the component(s) with the custom isotopics
+ entry will have the density modification.
+
+ 2. Density specified by custom isotopics will override all other density modifications in the component
+ construction phase (e.g. ``TD_frac`` entries).
+
+ 3. Only the component density is changed, not other material properties are altered to account for the
+ change in composition/density.
+
+ 4. Density can only be specified using custom isotopics for non- ``Custom`` materials that have some
+ initial density. Don't try to make ``Void`` have mass!
+
+Densities specified using ``Custom Isotopics`` are applied in component construction, and should be specified
+at the input temperature for the component. Note that when overriding the density of a library material, all
+other properties of that material (e.g. expansion coefficients) will continue to be used as if the component
+consisted of the library material. In other words, ARMI will still think the component is made out of the
+original material!
+
Advanced topics
---------------
@@ -1272,8 +1316,8 @@ Nuclide Flags
The ``nuclide flags`` setting allows the user to choose which nuclides they
would like to consider in the problem, and whether or not each nuclide should
transmute and decay. For example, sometimes you may not want to deplete trace
-elements in structural materials, but in other analysis you might. If the
-nuclide should deplete, it must have ``burn: true``. If it is to be included
+elements in structural materials, but in other analysis you might. If the
+nuclide should deplete, it must have ``burn: true``. If it is to be included
in the problem at all, it must be have ``xs: true`` All nuclides that will be
produced via transmutation/decay must also have ``burn: true``, so if you add
Thorium, make sure to add all other actinides in its chain. You can use the
@@ -1315,7 +1359,7 @@ deplete.::
U238: {burn: true, xs: true}
The code will crash if materials used in :ref:`blocks-and-components` contain nuclides not defined in
-``nuclide flags``. A failure can also occur if the burn chain is missing a nuclide.
+``nuclide flags``. A failure can also occur if the burn chain is missing a nuclide.
.. tip::
We plan to upgrade the default behavior of this to inherit from all defined materials
@@ -1457,16 +1501,16 @@ Fuel Management Tips
Some mistakes are common. Follow these tips.
* Always make sure your assembly-level types in the settings file are up to date
- with the grids in your bluepints file. Otherwise you'll be moving feeds when you
- want to move igniters, or something.
+ with the grids in your bluepints file. Otherwise you'll be moving feeds when you
+ want to move igniters, or something.
* Use the exclusions list! If you move a cascade and then the next cascade tries
- to run, it will choose your newly-moved assemblies if they fit your criteria in
- ``findAssemblies``. This leads to very confusing results. Therefore, once you move
- assemblies, you should default to adding them to the exclusions list.
+ to run, it will choose your newly-moved assemblies if they fit your criteria in
+ ``findAssemblies``. This leads to very confusing results. Therefore, once you move
+ assemblies, you should default to adding them to the exclusions list.
* Print cascades during debugging. After you've built a cascade to swap, print it
- out and check the locations and types of each assembly in it. Is it what you want?
+ out and check the locations and types of each assembly in it. Is it what you want?
* Watch ``typeNum`` in the database. You can get good intuition about what is
- getting moved by viewing this parameter.
+ getting moved by viewing this parameter.
Running a branch search
-----------------------
@@ -1534,7 +1578,7 @@ with *keff* closest to the setting, while still being above 1.0 is chosen.
Settings Report
===============
-This document lists all the `settings <#the-settings-input-file>`_ in ARMI.
+This document lists all the `settings <#the-settings-input-file>`_ in ARMI.
They are all accessible to developers
through the :py:class:`armi.settings.caseSettings.Settings` object, which is typically
@@ -1545,6 +1589,7 @@ through ``self.cs``.
.. exec::
from armi import settings
import textwrap
+ from dochelpers import escapeSpecialCharacters
def looks_like_path(s):
"""Super quick, not robust, check if a string looks like a file path."""
@@ -1563,7 +1608,8 @@ through ``self.cs``.
for setting in sorted(cs.values(), key=lambda s: s.name):
content += ' * - {}\n'.format(' '.join(wrapper.wrap(setting.name)))
- content += ' - {}\n'.format(' '.join(wrapper.wrap(str(setting.description) or '')))
+ description = escapeSpecialCharacters(str(setting.description) or "")
+ content += " - {}\n".format(" ".join(wrapper.wrap(description)))
default = str(getattr(setting, 'default', None)).split("/")[-1]
options = str(getattr(setting,'options','') or '')
if looks_like_path(default):
diff --git a/doc/user/manual_data_access.rst b/doc/user/manual_data_access.rst
index 3a322f308..34d1912c8 100644
--- a/doc/user/manual_data_access.rst
+++ b/doc/user/manual_data_access.rst
@@ -25,7 +25,8 @@ Accessing Some Interesting Info
Often times, you may be interested in the geometric dimensions of various blocks. These are stored on the
:py:mod:`components `, and may be accessed as follows::
- b = o.r.getFirstBlock(Flags.FUEL)
+ b = r.core.getFirstBlock(Flags.FUEL)
+ # Depending on how the reactor was loaded, this may need to be ``o.r``.
fuel = b.getComponent(Flags.FUEL)
od = fuel.getDimension('od',cold=True) # fuel outer diameter in cm
odHot = fuel.getDimension('od') # hot dimension
diff --git a/doc/user/outputs.rst b/doc/user/outputs.rst
index 8267fa93c..24ec598b9 100644
--- a/doc/user/outputs.rst
+++ b/doc/user/outputs.rst
@@ -248,5 +248,5 @@ still being able to faithfully reconstruct the original data. To accomplish this
HDF5 dataset attributes to indicate when some manipulation is necessary. Writing
such special data to the HDF5 file and reading it back again is accomplished with the
:py:func:`armi.bookkeeping.db.database3.packSpecialData` and
-:py:func:`armi.bookkeeping.db.database3.packSpecialData`. Refer to their implementations
+:py:func:`armi.bookkeeping.db.database3.unpackSpecialData`. Refer to their implementations
and documentation for more details.
diff --git a/doc/user/physics_coupling.rst b/doc/user/physics_coupling.rst
index 15872a385..b9d29db93 100644
--- a/doc/user/physics_coupling.rst
+++ b/doc/user/physics_coupling.rst
@@ -76,3 +76,12 @@ The total convergence of the power distribution is finally measured through the
.. math::
\epsilon = \| \xi \|_{\inf} = \max \xi.
+
+
+The Global Flux Interface
+-------------------------
+The :py:class:`Global Flux Interface `
+class will attempt to set its own ``TightCoupler`` based on ``keff``. To see the specifics, see:
+:py:meth:`_setTightCouplingDefaults `.
+If you want to change the tight coupling performance of the ``GlobalFluxInterface``, it would be
+easiest to just sublass the interface and over-write the `_setTightCouplingDefaults` method.
diff --git a/doc/user/reactor_parameters_report.rst b/doc/user/reactor_parameters_report.rst
index fe2dd2dea..e8c7550f7 100644
--- a/doc/user/reactor_parameters_report.rst
+++ b/doc/user/reactor_parameters_report.rst
@@ -4,7 +4,8 @@
Reactor Parameters
******************
-This document lists all of the Reactor Parameters that are provided by the ARMI Framework.
+This document lists all of the :py:mod:`Reactor Parameters ` that are provided by the
+ARMI Framework. See :py:mod:`armi.reactor.parameters` for use.
.. exec::
from armi.reactor import reactors
diff --git a/doc/user/user_install.rst b/doc/user/user_install.rst
index 6a454b58f..e45a45d15 100644
--- a/doc/user/user_install.rst
+++ b/doc/user/user_install.rst
@@ -12,7 +12,7 @@ particular, we assume familiarity with `Python `__,
You must have the following installed before proceeding:
-* `Python `__ version 3.7 or newer (preferably 64-bit).
+* `Python `__ version 3.9 or newer.
.. admonition:: The right Python command
diff --git a/pyproject.toml b/pyproject.toml
index a766d4902..069d04d41 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,26 +21,29 @@ build-backend = "setuptools.build_meta"
[project]
name = "armi"
-version = "0.3.0"
+version = "0.4.0"
description = "An open-source nuclear reactor analysis automation framework that helps design teams increase efficiency and quality."
license = {file = "LICENSE.md"}
-requires-python = ">3.6"
+requires-python = ">3.8"
readme = "README.rst"
authors = [
{ name="TerraPower, LLC", email="armi-devs@terrapower.com" },
]
dependencies = [
"coverage>=7.2.0", # Code coverage tool. Sadly baked into every Case.
- "h5py>=3.0,<=3.9", # Needed because our database files are H5 format
+ "h5py>=3.9,<3.12 ; python_version >= '3.11.0'", # Needed because our database files are H5 format
+ "h5py>=3.0,<=3.9 ; python_version < '3.11.0'",
"htmltree>=0.7.6", # Our reports have HTML output
- "matplotlib>=3.5.3", # Important plotting library
- "numpy>=1.21,<=1.23.5", # Important math library
+ "matplotlib>=3.5.3,<3.8.0", # Important plotting library
+ "numpy>=1.21", # Important math library
"ordered-set>=3.1.1", # A useful data structure
"pluggy>=1.2.0", # Central tool behind the ARMI Plugin system
"pyDOE>=0.3.8", # We import a Latin-hypercube algorithm to explore a phase space
"pyevtk>=1.2.0", # Handles binary VTK visualization files
- "ruamel.yaml.clib<=0.2.7", # C-based core of ruamel below
- "ruamel.yaml<=0.17.21", # Our foundational YAML library
+ "ruamel.yaml.clib ; python_version >= '3.11.0'", # C-based core of ruamel below
+ "ruamel.yaml ; python_version >= '3.11.0'", # Our foundational YAML library
+ "ruamel.yaml.clib<=0.2.7 ; python_version < '3.11.0'", # C-based core of ruamel below
+ "ruamel.yaml<=0.17.21 ; python_version < '3.11.0'", # Our foundational YAML library
"scipy>=1.7.0", # Used for curve-fitting and matrix math
"tabulate>=0.8.9", # Used to pretty-print tabular data
"toml>0.9.5", # Needed to parse the pyproject.toml file
@@ -55,11 +58,10 @@ classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Information Analysis",
]
@@ -82,7 +84,7 @@ test = [
"pytest>=7.0.0", # Our primary test tooling
"pytest-cov>=4.0.0", # coverage plugin
"pytest-xdist>=3.0.0", # To spread our tests over multiple CPUs
- "ruff==0.0.272", # Linting and code formatting (version-pinned)
+ "ruff==0.5.1", # Linting and code formatting (version-pinned)
]
docs = [
#######################################################################
@@ -125,14 +127,42 @@ find = {}
#######################################################################
[tool.ruff]
# This is the exact version of Ruff we use.
-required-version = "0.0.272"
+required-version = "0.5.1"
# Assume Python 3.9
target-version = "py39"
-# Setting line-length to 140 (though blacks default is 88)
-line-length = 140
+# Setting line-length to 120 (though blacks default is 88)
+line-length = 120
+# Exclude a variety of commonly ignored directories.
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ "__pycache__",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "doc/tutorials/armi-example-app",
+ "node_modules",
+ "venv",
+]
+
+[tool.ruff.lint]
# Enable pycodestyle (E) and Pyflakes (F) codes by default.
# D - NumPy docstring rules
# N801 - Class name should use CapWords convention
@@ -167,33 +197,7 @@ select = ["E", "F", "D", "N801", "SIM", "TID"]
#
ignore = ["D100", "D101", "D102", "D103", "D105", "D106", "D205", "D401", "D404", "E731", "RUF100", "SIM102", "SIM105", "SIM108", "SIM114", "SIM115", "SIM117", "SIM118"]
-# Exclude a variety of commonly ignored directories.
-exclude = [
- ".bzr",
- ".direnv",
- ".eggs",
- ".git",
- ".git-rewrite",
- ".hg",
- ".mypy_cache",
- ".nox",
- ".pants.d",
- ".pytype",
- ".ruff_cache",
- ".svn",
- ".tox",
- ".venv",
- "__pycache__",
- "__pypackages__",
- "_build",
- "buck-out",
- "build",
- "dist",
- "node_modules",
- "venv",
-]
-
-[tool.ruff.per-file-ignores]
+[tool.ruff.lint.per-file-ignores]
# D1XX - enforces writing docstrings
# E741 - ambiguous variable name
# N - We have our own naming conventions for unit tests.
@@ -201,10 +205,10 @@ exclude = [
"*/tests/*" = ["D1", "E741", "N", "SLF001"]
"doc/gallery-src/*" = ["D400"]
-[tool.ruff.flake8-tidy-imports]
+[tool.ruff.lint.flake8-tidy-imports]
ban-relative-imports = "all"
-[tool.ruff.pydocstyle]
+[tool.ruff.lint.pydocstyle]
convention = "numpy"
@@ -219,6 +223,48 @@ filterwarnings = [
"ignore: the matrix subclass is not the recommended way:PendingDeprecationWarning",
]
+[tool.coverage.run]
+exclude_also = [
+ "armi/cli/gridGui.py",
+ "armi/utils/gridEditor.py",
+ "armi/utils/tests/test_gridGui.py",
+ "venv/",
+ ]
+source = ["armi"]
+parallel = true
+# Change default .coverage file to something that doesn't have a dot
+# because some Windows services can't handle dots.
+data_file = "coverage_results.cov"
+
+[tool.coverage.report]
+# Regexes for lines to exclude from consideration
+omit = [
+ "armi/cli/gridGui.py",
+ "armi/utils/gridEditor.py",
+ "*/tests/*",
+ ]
+
+exclude_also = [
+ # Don't complain about missing debug-only code:
+ "def __repr__",
+ "if self\\.debug",
+
+ # Don't complain about missing type checking-only code:
+ "if TYPE_CHECKING",
+
+ # Don't complain if tests don't hit defensive assertion code:
+ "raise AssertionError",
+ "raise KeyboardInterrupt",
+ "raise NotImplementedError",
+ "except ImportError",
+ "pass",
+
+ # Don't complain if non-runnable code isn't run:
+ "if __name__ == .__main__.:",
+ ]
+
+ignore_errors = true
+
#######################################################################
# DATA FILES TO BE INCLUDED WITH THE PROJECT #
@@ -265,7 +311,6 @@ armi = [
"physics/neutronics/tests/ISOXA",
"physics/neutronics/tests/rzmflxYA",
"resources/*",
- "resources/**/*",
"tests/1DslabXSByCompTest.yaml",
"tests/armiRun-SHUFFLES.txt",
"tests/armiRun.yaml",
@@ -277,14 +322,15 @@ armi = [
"tests/geom.xml",
"tests/geom1Assem.xml",
"tests/ISOAA",
- "tests/refOneBlockReactor.yaml",
"tests/refSmallCartesian.yaml",
"tests/refSmallCoreGrid.yaml",
"tests/refSmallReactor.yaml",
"tests/refSmallReactorBase.yaml",
"tests/refSmallSfpGrid.yaml",
"tests/refTestCartesian.yaml",
- "tests/sfpGeom.yaml",
+ "tests/smallestTestReactor/armiRunSmallest.yaml",
+ "tests/smallestTestReactor/refOneBlockReactor.yaml",
+ "tests/smallestTestReactor/refSmallestReactor.yaml",
"tests/ThRZGeom.xml",
"tests/ThRZloading.yaml",
"tests/ThRZSettings.yaml",
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index c7e4a6693..000000000
--- a/tox.ini
+++ /dev/null
@@ -1,87 +0,0 @@
-[tox]
-envlist = py38,lint,cov
-requires =
- pip >= 20.2
-
-[testenv]
-basepython = {env:PYTHON3_PATH:python3}
-setenv =
- PYTHONPATH = {toxinidir}
- USERNAME = armi
-
-[testenv:test]
-commands =
- pip install -e .[memprof,mpi,test]
- pytest -n 4 armi
-
-[testenv:doc]
-allowlist_externals =
- /usr/bin/git
- /usr/bin/make
-changedir = doc
-commands =
- pip install -e ..[memprof,mpi,test,docs]
- git submodule init
- git submodule update
- make html
-
-# First, run code coverage over the rest of the usual unit tests.
-[testenv:cov1]
-deps=
- mpi4py
-allowlist_externals =
- /usr/bin/mpiexec
-commands =
- pip install -e .[memprof,mpi,test]
- coverage run --rcfile=.coveragerc -m pytest -n 4 --cov=armi --cov-config=.coveragerc --cov-report=lcov --ignore=venv armi
-
-# Second, run code coverage over the unit tests that run MPI library code, and combine the coverage results together.
-[testenv:cov2]
-deps=
- mpi4py
-allowlist_externals =
- /usr/bin/mpiexec
-commands =
- pip install -e .[memprof,mpi,test]
- mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=.coveragerc -m pytest --cov=armi --cov-config=.coveragerc --cov-report=lcov --cov-append --ignore=venv armi/tests/test_mpiFeatures.py
- mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=.coveragerc -m pytest --cov=armi --cov-config=.coveragerc --cov-report=lcov --cov-append --ignore=venv armi/tests/test_mpiParameters.py
- coverage combine --rcfile=.coveragerc --keep -a
-
-# NOTE: This only runs the MPI unit tests.
-# NOTE: This will only work in POSIX/BASH Linux.
-[testenv:mpitest]
-deps=
- mpi4py
-allowlist_externals =
- /usr/bin/mpiexec
-commands =
- pip install -e .[memprof,mpi,test]
- mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiFeatures.py
- mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiParameters.py
-
-[testenv:lint]
-deps=
- ruff==0.0.272
-commands =
- ruff .
-
-[testenv:report]
-skip_install = true
-deps=
- mpi4py
-commands =
- coverage report
- coverage html
-
-[testenv:manifest]
-basepython = {env:PYTHON3_PATH:python3}
-setenv =
- PYTHONPATH = {toxinidir}
- USERNAME = armi
-commands =
- python .github/workflows/validatemanifest.py
-
-[testenv:clean]
-deps = coverage
-skip_install = true
-commands = coverage erase