-
Notifications
You must be signed in to change notification settings - Fork 12
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #147 from abachma2/global
Global Sensitivity Analysis
- Loading branch information
Showing
14 changed files
with
2,644 additions
and
0 deletions.
There are no files selected for viewing
1,336 changes: 1,336 additions & 0 deletions
1,336
input/haleu/sensitivity-analysis/global/Global_Analysis.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
# Global Sensitivity Analysis | ||
This directory contains files for performing global sensitivity | ||
analysis on the transtion from US LWRs to advanced reactors. | ||
There are three subdirectories, one for varying the build share | ||
of each advanced reactor considered in this work. In each case | ||
(i.e., subdirectory) the transition start time, LWR lifetime, | ||
Xe-100 burnup, and MMR build share are varied, but the build share | ||
of only advanced reactor is varied. The build share of only | ||
one advanced reactor is varied at a time to prevent unphysical | ||
combinations of advanced reactor build shares (e.g., 50% Xe-100, | ||
50% MMR, and 50% VOYGR). | ||
|
||
Each subdirectory contains two different Dakota input files: | ||
``list_sample.in`` and ``dakota.in``. The ``list_sample.in`` file | ||
treats the MMR burnup as a continuous variable and the Xe-100 | ||
burnup as a discrete variable, with more points than previously | ||
considered. This input file is run first, creating a ``.dat`` file | ||
with data for the output metrics for each combination of input | ||
parameters. This ``.dat`` file is then read by the ``dakota.in`` | ||
input file to create a surrogate model of the data that treats all | ||
of the variables as continuous. The suurogate models created by | ||
the ``dakota.in`` file are used for variance decomposition to | ||
calculate Sobol' indices. Two surrogate model fits are used: quadratic | ||
and gaussian. |
43 changes: 43 additions & 0 deletions
43
input/haleu/sensitivity-analysis/global/mmr_share/dakota.in
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
# Global analysis for variations in Xe-100 build share | ||
environment, | ||
tabular_data | ||
tabular_data_file = 'mmr_share_gaussian.dat' | ||
output_file = 'mmr_share_gaussian.out' | ||
write_restart = 'mmr_share_gaussian.rst' | ||
|
||
method, | ||
sampling | ||
sample_type = lhs | ||
variance_based_decomp | ||
samples = 4000 # 100*parameters*(response+2) | ||
seed = 112822 | ||
|
||
model | ||
id_model = 'SURR' | ||
surrogate global | ||
import_points_file = 'mmr_share_list.dat' annotated | ||
#quadratic | ||
gaussian_process surfpack | ||
metrics = "root_mean_squared" "sum_abs" "rsquared" | ||
|
||
variables, | ||
active all | ||
uniform_uncertain = 5 | ||
lower_bounds = 721 0 0 41 28 | ||
upper_bounds = 901 50 50 90 185 | ||
descriptors = 'ts' 'lwr' 'mmr_share' 'mmr_burnup' 'xe100_burnup' | ||
|
||
interface, | ||
fork | ||
asynchronous | ||
evaluation_concurrency = 36 | ||
analysis_drivers = 'python mmr_share_driver.py' | ||
|
||
|
||
responses, | ||
descriptors = 'enr_u' 'haleu' 'swu' 'haleu_swu' 'waste' 'feed' | ||
response_functions = 6 | ||
scalar_responses = 6 | ||
no_gradients | ||
no_hessians | ||
|
40 changes: 40 additions & 0 deletions
40
input/haleu/sensitivity-analysis/global/mmr_share/list_sample.in
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
# Global analysis for variations in MMR build share | ||
environment, | ||
tabular_data | ||
tabular_data_file = 'mmr_share_list.dat' | ||
output_file = 'mmr_share_list.out' | ||
write_restart = 'mmr_share_list.rst' | ||
|
||
method, | ||
sampling | ||
sample_type = lhs | ||
samples = 4000 | ||
seed = 120622 | ||
|
||
|
||
variables, | ||
active all | ||
continuous_design = 4 | ||
lower_bounds = 721 0 0 41 | ||
upper_bounds = 901 50 50 90 | ||
descriptors = 'ts' 'lwr' 'mmr_share' 'mmr_burnup' | ||
discrete_design_set | ||
integer = 1 | ||
elements_per_variable = 16 | ||
elements = 28 32 48 56 64 72 84 96 112 120 128 140 151 160 168 185 | ||
descriptors = 'xe100_burnup' | ||
|
||
interface, | ||
fork | ||
asynchronous | ||
evaluation_concurrency = 44 | ||
analysis_drivers = 'python mmr_share_driver.py' | ||
|
||
|
||
responses, | ||
descriptors = 'enr_u' 'haleu' 'swu' 'haleu_swu' 'waste' 'feed' | ||
response_functions = 6 | ||
scalar_responses = 6 | ||
no_gradients | ||
no_hessians | ||
|
97 changes: 97 additions & 0 deletions
97
input/haleu/sensitivity-analysis/global/mmr_share/mmr_share_driver.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
import numpy as np | ||
import subprocess | ||
import dakota.interfacing as di | ||
import sys | ||
import os | ||
from turtle import up | ||
sys.path.append('../../../../../scripts') | ||
import create_AR_DeployInst as cdi | ||
import output_metrics as oup | ||
import dakota_input as inp | ||
# ---------------------------- | ||
# Parse Dakota parameters file | ||
# ---------------------------- | ||
|
||
params, results = di.read_parameters_file() | ||
|
||
# ------------------------------- | ||
# Convert and send to Cyclus | ||
# ------------------------------- | ||
|
||
# Edit Cyclus input file | ||
cyclus_template = 'mmr_share_input.xml.in' | ||
scenario_name = ('ts_' + str(int(params['ts'])) + | ||
'_lwr_' + str(int(params['lwr'])) + | ||
'_mmr_share_' + str(int(params['mmr_share'])) + | ||
'_xe100_burnup_' + str(int(params['xe100_burnup'])) + | ||
'_mmr_burnup_' + str(int(params['mmr_burnup']))) | ||
variable_dict = {'handle': scenario_name, | ||
'ts': str(int(params['ts'])), | ||
'lwr': str(int(params['lwr'])), | ||
'mmr_share': str(int(params['mmr_share'])), | ||
'mmr_burnup':str(int(params['mmr_burnup'])), | ||
'xe100_burnup':str(int(params['xe100_burnup']))} | ||
output_xml = "./cyclus-files/" + scenario_name + ".xml" | ||
output_sqlite = './cyclus-files/' + scenario_name + '.sqlite' | ||
|
||
mmr_lifetime = int(np.round(params['mmr_burnup'],2)*1331.73/15/30) | ||
mmr_burnup_dict = {'mmr_lifetime':mmr_lifetime} | ||
inp.render_input('../mmr_burnup_input.xml.in', | ||
mmr_burnup_dict, | ||
"./cyclus-files/mmr_" + str(int(params['mmr_burnup'])) + ".xml") | ||
|
||
xe100_cycles = {28:(1,7), 32:(1,8), 48:(2,6), 56:(2,7), 64:(2,8), | ||
72:(3,6), 84:(3,7), 96:(3,8), 112:(4,7), 128:(4,8), | ||
120:(5,6), 140:(5,7), 160:(5,8), 151:(6,6), 168:(6,7), | ||
185:(6,8)} | ||
xe100_burnup_dict = {'xe100_cycle':xe100_cycles[int(params['xe100_burnup'])][1], | ||
'xe100_n_assem':xe100_cycles[int(params['xe100_burnup'])][0], | ||
'xe100_assem':1675.44/xe100_cycles[int(params['xe100_burnup'])][0] | ||
} | ||
inp.render_input("../xe100_burnup_input.xml.in", | ||
xe100_burnup_dict, | ||
"./cyclus-files/xe100_" + str(int(params['xe100_burnup'])) + | ||
".xml") | ||
|
||
inp.render_input(cyclus_template, variable_dict, output_xml) | ||
|
||
# Create DeployInst for LWRs | ||
DI_dict = cdi.write_lwr_deployinst( | ||
params['lwr'], | ||
"../../../inputs/united_states/buildtimes/" + | ||
"UNITED_STATES_OF_AMERICA/deployinst.xml", | ||
"../../../../../database/lwr_power_order.txt") | ||
cdi.write_deployinst(DI_dict, './cyclus-files/' + | ||
scenario_name + | ||
'_deployinst.xml') | ||
|
||
# Create DeployInst for advanced reactors | ||
duration = 1500 | ||
reactor_prototypes = {'Xe-100': (76, 720), | ||
'MMR': (5, mmr_lifetime), | ||
'VOYGR': (73, 720)} | ||
demand_equation = np.zeros(duration) | ||
demand_equation[int(params['ts']):] = 87198.156 | ||
lwr_DI = cdi.convert_xml_to_dict('./cyclus-files/' + | ||
scenario_name + | ||
'_deployinst.xml') | ||
deploy_schedule = cdi.write_AR_deployinst( | ||
lwr_DI, | ||
"../../../inputs/united_states/reactors/", | ||
duration, | ||
reactor_prototypes, | ||
demand_equation, | ||
{'MMR':int(params['mmr_share'])}) | ||
cdi.write_deployinst(deploy_schedule, "./cyclus-files/AR_DeployInst_" + | ||
scenario_name + | ||
".xml") | ||
|
||
# Run Cyclus with edited input file | ||
oup.run_cyclus(output_sqlite, output_xml) | ||
|
||
# ---------------------------- | ||
# Return the results to Dakota | ||
# ---------------------------- | ||
results = oup.get_all_results(results, output_sqlite) | ||
|
||
os.system('rm ' + output_sqlite) |
Oops, something went wrong.