Skip to content

Commit

Permalink
Merge pull request #149 from abachma2/optimization
Browse files Browse the repository at this point in the history
Single optimization for once through scenarios
  • Loading branch information
nsryan2 authored Mar 28, 2023
2 parents d9625ce + 33a8b37 commit 11975e8
Show file tree
Hide file tree
Showing 14 changed files with 1,080 additions and 3 deletions.
49 changes: 49 additions & 0 deletions input/haleu/optimization/once-through/min_haleu.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#Optimization of Scenatio 7 for minimizing HALEU

environment
tabular_data
tabular_data_file = './min_haleu.dat'
output_file = './min_haleu.out'
write_restart = './min_haleu.rst'

method
soga
max_function_evaluations = 1000
population_size = 50
mutation_type = replace_uniform
mutation_rate = 0.116
crossover_type = shuffle_random
crossover_rate = 0.584
convergence_type = best_fitness_tracker
fitness_type = merit_function
constraint_penalty = 1.109

model
single

variables
active all
discrete_design_range = 4
lower_bounds = 0 0 0 0
upper_bounds = 100 100 100 50
descriptors = 'mmr_share' 'xe100_share' 'voygr_share' 'lwr'
discrete_design_set
integer = 2
elements_per_variable = 7 8
elements = 41 62 74 78 82 86 90 28 56 84 112 140 151 168 185
descriptors = 'mmr_burnup' 'xe100_burnup'
linear_equality_constraint_matrix = 1 1 1 0 0 0
linear_equality_targets = 100

interface
fork
asynchronous
evaluation_concurrency = 48
analysis_drivers = 'python min_haleu_driver.py'

responses
objective_functions = 1
sense = 'min'
descriptors = 'haleu_swu'
no_gradients
no_hessians
92 changes: 92 additions & 0 deletions input/haleu/optimization/once-through/min_haleu_driver.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import numpy as np
import subprocess
import dakota.interfacing as di
import sys
import os
sys.path.append('../../../../scripts')
import create_AR_DeployInst as cdi
import output_metrics as oup
import dakota_input as inp
# import output as oup
# ----------------------------
# Parse Dakota parameters file
# ----------------------------

params, results = di.read_parameters_file()

# -------------------------------
# Convert and send to Cyclus
# -------------------------------

# Edit Cyclus input file
cyclus_template = 'oncethrough_input.xml.in'
scenario_name = ('lwr_' +
str(int(params['lwr'])) +
'_mmr_share_' +
str(int(params['mmr_share'])) +
'_xe100_share_' +
str(int(params['xe100_share'])) +
'_voygr_share_' +
str(int(params['voygr_share'])) +
'_mmr_burnup_' +
str(int(params['mmr_burnup'])) +
'_xe100_burnup_' +
str(int(params['xe100_burnup'])))
variable_dict = {'handle': scenario_name,
'lwr':str(int(params['lwr'])),
'mmr_share':str(int(params['mmr_share'])),
'xe100_share':str(int(params['xe100_share'])),
'voygr_share':str(int(params['voygr_share'])),
'mmr_burnup':str(int(params['mmr_burnup'])),
'xe100_burnup':str(int(params['xe100_burnup']))}
output_xml = './cyclus-files/' + scenario_name + '.xml'

output_sqlite = './cyclus-files/' + scenario_name + '.sqlite'
inp.render_input(cyclus_template, variable_dict, output_xml)

# Create DeployInst for LWRs
DI_dict = cdi.write_lwr_deployinst(
params['lwr'],
"../../inputs/united_states/buildtimes/" +
"UNITED_STATES_OF_AMERICA/deployinst.xml",
"../../../../database/lwr_power_order.txt")
cdi.write_deployinst(DI_dict, './cyclus-files/' +
scenario_name +
'_deployinst.xml')

# Create DeployInst for advanced reactors
duration = 1500
mmr_lifetimes = {41:120, 62:180, 74:218, 78:231, 82:240, 86:255, 90:267}
reactor_prototypes = {'Xe-100': (76, 720),
'MMR': (5, mmr_lifetimes[int(params['mmr_burnup'])]),
'VOYGR': (73, 720)}
demand_equation = np.zeros(duration)
demand_equation[721:] = 87198.156
lwr_DI = cdi.convert_xml_to_dict("./cyclus-files/" +
scenario_name +
'_deployinst.xml')
deploy_schedule = cdi.write_AR_deployinst(
lwr_DI,
"../../inputs/united_states/reactors/",
duration,
reactor_prototypes,
demand_equation,
{'MMR':int(params['mmr_share']),
'Xe-100':int(params['xe100_share']),
'VOYGR':int(params['voygr_share'])})
cdi.write_deployinst(deploy_schedule, "./cyclus-files/AR_DeployInst_" +
scenario_name +
".xml")

# Run Cyclus with edited input file
oup.run_cyclus(output_sqlite, output_xml)

# ----------------------------
# Return the results to Dakota
# ----------------------------
results['haleu_swu'].function = oup.calculate_swu(output_sqlite,
['Xe-100', 'MMR'],
721)
results.write()

os.system('rm ' + output_sqlite)
49 changes: 49 additions & 0 deletions input/haleu/optimization/once-through/min_waste.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#Optimization of Scenatio 7 for minimizing HALEU

environment
tabular_data
tabular_data_file = './min_waste.dat'
output_file = './min_waste.out'
write_restart = './min_waste.rst'

method
soga
max_function_evaluations = 1000
population_size = 50
mutation_type = replace_uniform
mutation_rate = 0.116
crossover_type = shuffle_random
crossover_rate = 0.584
convergence_type = best_fitness_tracker
fitness_type = merit_function
constraint_penalty = 1.109

model
single

variables
active all
discrete_design_range = 4
lower_bounds = 0 0 0 0
upper_bounds = 100 100 100 50
descriptors = 'mmr_share' 'xe100_share' 'voygr_share' 'lwr'
discrete_design_set
integer = 2
elements_per_variable = 7 8
elements = 41 62 74 78 82 86 90 28 56 84 112 140 151 168 185
descriptors = 'mmr_burnup' 'xe100_burnup'
linear_equality_constraint_matrix = 1 1 1 0 0 0
linear_equality_targets = 100

interface
fork
asynchronous
evaluation_concurrency = 48
analysis_drivers = 'python min_waste_driver.py'

responses
objective_functions = 1
sense = 'min'
descriptors = 'waste'
no_gradients
no_hessians
95 changes: 95 additions & 0 deletions input/haleu/optimization/once-through/min_waste_driver.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import numpy as np
import subprocess
import dakota.interfacing as di
import sys
import os
sys.path.append('../../../../scripts')
import create_AR_DeployInst as cdi
import output_metrics as oup
import dakota_input as inp
# import output as oup
# ----------------------------
# Parse Dakota parameters file
# ----------------------------

params, results = di.read_parameters_file()

# -------------------------------
# Convert and send to Cyclus
# -------------------------------

# Edit Cyclus input file
cyclus_template = 'oncethrough_input.xml.in'
scenario_name = ('lwr_' +
str(int(params['lwr'])) +
'_mmr_share_' +
str(int(params['mmr_share'])) +
'_xe100_share_' +
str(int(params['xe100_share'])) +
'_voygr_share_' +
str(int(params['voygr_share'])) +
'_mmr_burnup_' +
str(int(params['mmr_burnup'])) +
'_xe100_burnup_' +
str(int(params['xe100_burnup'])))
variable_dict = {'handle': scenario_name,
'lwr':str(int(params['lwr'])),
'mmr_share':str(int(params['mmr_share'])),
'xe100_share':str(int(params['xe100_share'])),
'voygr_share':str(int(params['voygr_share'])),
'mmr_burnup':str(int(params['mmr_burnup'])),
'xe100_burnup':str(int(params['xe100_burnup']))}
output_xml = './cyclus-files/' + scenario_name + '.xml'

output_sqlite = './cyclus-files/' + scenario_name + '.sqlite'
inp.render_input(cyclus_template, variable_dict, output_xml)

# Create DeployInst for LWRs
DI_dict = cdi.write_lwr_deployinst(
params['lwr'],
"../../inputs/united_states/buildtimes/" +
"UNITED_STATES_OF_AMERICA/deployinst.xml",
"../../../../database/lwr_power_order.txt")
cdi.write_deployinst(DI_dict, './cyclus-files/' +
scenario_name +
'_deployinst.xml')

# Create DeployInst for advanced reactors
duration = 1500
mmr_lifetimes = {41:120, 62:180, 74:218, 78:231, 82:240, 86:255, 90:267}
reactor_prototypes = {'Xe-100': (76, 720),
'MMR': (5, mmr_lifetimes[int(params['mmr_burnup'])]),
'VOYGR': (73, 720)}
demand_equation = np.zeros(duration)
demand_equation[721:] = 87198.156
lwr_DI = cdi.convert_xml_to_dict("./cyclus-files/" +
scenario_name +
'_deployinst.xml')
deploy_schedule = cdi.write_AR_deployinst(
lwr_DI,
"../../inputs/united_states/reactors/",
duration,
reactor_prototypes,
demand_equation,
{'MMR':int(params['mmr_share']),
'Xe-100':int(params['xe100_share']),
'VOYGR':int(params['voygr_share'])})
cdi.write_deployinst(deploy_schedule, "./cyclus-files/AR_DeployInst_" +
scenario_name +
".xml")

# Run Cyclus with edited input file
oup.run_cyclus(output_sqlite, output_xml)

# ----------------------------
# Return the results to Dakota
# ----------------------------
results['waste'].function = oup.get_waste_discharged(output_sqlite,
['Xe-100', 'MMR','VOYGR'],
721,
{'MMR':'spent_MMR_haleu',
'Xe-100':'spent_xe100_haleu',
'VOYGR':'spent_smr_fuel'})
results.write()

os.system('rm ' + output_sqlite)
Loading

0 comments on commit 11975e8

Please sign in to comment.