Skip to content

Commit

Permalink
Merge branch 'NREL:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
softwareengineerprogrammer authored Oct 10, 2023
2 parents 46b15b4 + d821f87 commit 9499fe5
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 45 deletions.
26 changes: 26 additions & 0 deletions src/geophires_x/GEOPHIRESv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,32 @@ def main(enable_geophires_logging_config=True):
# write the outputs, if requested
model.outputs.PrintOutputs(model)

# write the outputs as JSON
import jsons, json
jsons.suppress_warnings(True)
JSONresrv = jsons.dumps(model.reserv.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
JSONwells = jsons.dumps(model.wellbores.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
JSONsurfaceplant = jsons.dumps(model.surfaceplant.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
JSONEconomics = jsons.dumps(model.economics.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
jsonMerged = {**json.loads(JSONresrv), **json.loads(JSONwells), **json.loads(JSONEconomics), **json.loads(JSONsurfaceplant)}
if model.economics.DoAddOnCalculations.value:
JSONAddons = jsons.dumps(model.addeconomics.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
jsonMerged = {**jsonMerged, **json.loads(JSONAddons)}
if model.economics.DoCCUSCalculations.value:
JSONCCUS = jsons.dumps(model.ccuseconomics.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
jsonMerged = {**jsonMerged, **json.loads(JSONCCUS)}
if model.economics.DoSDACGTCalculations.value:
JSONSDACGT = jsons.dumps(model.sdacgteconomics.OutputParameterDict, indent=4, sort_keys=True, supress_warnings=True)
jsonMerged = {**jsonMerged, **json.loads(JSONSDACGT)}

JSONoutputfile = "HDR.json"
if len(sys.argv) > 2:
JSONoutputfile = str(sys.argv[2])
segs = JSONoutputfile.split('.')
JSONoutputfile = segs[0] + '.json'
with open(JSONoutputfile, 'w', encoding='UTF-8') as f:
f.write(json.dumps(jsonMerged))

# if the user has asked for it, copy the output file to the screen
if model.outputs.printoutput:
outputfile = "HDR.out"
Expand Down
1 change: 1 addition & 0 deletions src/geophires_x/LHSReservoir.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import geophires_x.Model as Model
from .Reservoir import Reservoir


class LHSReservoir(Reservoir):
"""
This class models the Linear Heat Sweep Reservoir.
Expand Down
93 changes: 50 additions & 43 deletions src/geophires_x/MC_GeoPHIRES3.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
@author: Malcolm Ross V3
"""
# TODO Use this video to update this function https://www.youtube.com/watch?v=fKl2JW_qrso
# TODO Remove this line: logger.warn("space found in line " + str(result_count))

import os
import sys
Expand All @@ -20,8 +19,8 @@
import argparse
import uuid
import shutil
import concurrent.futures
import subprocess
import multiprocessing


def CheckAndReplaceMean(input_value, args) -> list:
Expand Down Expand Up @@ -51,11 +50,20 @@ def CheckAndReplaceMean(input_value, args) -> list:
i = i + 1
return input_value

def WorkPackage(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPath: str):

def WorkPackage(pass_list):
Inputs = pass_list[0]
Outputs = pass_list[1]
args = pass_list[2]
Outputfile = pass_list[3]
working_dir = pass_list[4]
PythonPath = pass_list[5]

tmpoutputfile = tmpfilename = ""
#get random values for each of the INPUTS based on the distributions and boundary values
# get random values for each of the INPUTS based on the distributions and boundary values
rando = 0.0
s = ""
print("#", end="")
for input_value in Inputs:
if input_value[1].strip().startswith('normal'):
rando = np.random.normal(float(input_value[2]), float(input_value[3]))
Expand All @@ -70,7 +78,7 @@ def WorkPackage(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPa
rando = np.random.lognormal(float(input_value[2]), float(input_value[3]))
s = s + input_value[0] + ", " + str(rando) + os.linesep
if input_value[1].strip().startswith('binomial'):
rando = np.random.binomial(float(input_value[2]), float(input_value[3]))
rando = np.random.binomial(int(input_value[2]), float(input_value[3]))
s = s + input_value[0] + ", " + str(rando) + os.linesep

# make up a temporary file name that will be shared among files for this iteration
Expand All @@ -85,7 +93,8 @@ def WorkPackage(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPa
with open(tmpfilename, "a") as f:
f.write(s)

#start GeoPHIRES/HIP-RA with that input file. Capture the output into a filename that is the same as the input file but has the suffix "_result.txt".
# start the passed in program name (usually GeoPHIRES or HIP-RA) with the supplied input file.
# Capture the output into a filename that is the same as the input file but has the suffix "_result.txt".
sprocess = subprocess.Popen([PythonPath, args.Code_File, tmpfilename, tmpoutputfile], stdout=subprocess.DEVNULL)
sprocess.wait()

Expand All @@ -99,20 +108,20 @@ def WorkPackage(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPa
# make sure a key file exists. If not, exit
if not os.path.exists(tmpoutputfile):
print("Timed out waiting for: " + tmpoutputfile)
logger.warning("Timed out waiting for: " + tmpoutputfile)
# logger.warning("Timed out waiting for: " + tmpoutputfile)
exit(-33)

with open(tmpoutputfile, "r") as f:
s1=f.readline()
i=0
while s1:
for out in localOutputs:
if out in s1:
localOutputs.remove(out)
s2 = s.split(":")
s2 = s2[1].strip()
s2 = s2.split(" ")
s2 = s2[0]
while s1: # read until the end of the file
for out in localOutputs: # check for each requested output
if out in s1: # If true, we found the output value that the user requested, so process it
localOutputs.remove(out) # as an optimization, drop the output from the list once we have found it
s2 = s1.split(":") # colon marks the split between the title and the data
s2 = s2[1].strip() # remove leading and trailing spaces
s2 = s2.split(" ") # split on space because there is a unit string after the value we are looking for
s2 = s2[0].strip() # we finally have the result we were looking for
result_s = result_s + s2 + ", "
i = i + 1
if i < (len(Outputs) - 1):
Expand All @@ -122,6 +131,10 @@ def WorkPackage(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPa
break
s1 = f.readline()

# append the input values to the output values so the optimal input values are easy to find,
# the form "inputVar:Rando;nextInputVar:Rando..."
result_s = result_s + "(" + s.replace(os.linesep, ";", -1).replace(", ", ":", -1) + ")"

# delete temporary files
os.remove(tmpfilename)
os.remove(tmpoutputfile)
Expand Down Expand Up @@ -181,9 +194,6 @@ def main(enable_geophires_logging_config=True):
parser.add_argument("MC_Settings_file", help="MC Settings file")
args = parser.parse_args()

# Set up a unique Job_ID
Job_ID = str(uuid.uuid4())

# make a list of the INPUTS, distribution functions, and the inputs for that distribution function.
# Make a list of the OUTPUTs
# Find the iteration value
Expand Down Expand Up @@ -228,27 +238,27 @@ def main(enable_geophires_logging_config=True):
s = "".join(s.rsplit(" ", 1)) # get rid of last space
s = "".join(s.rsplit(",", 1)) # get rid of last comma
s = s + os.linesep

# write the header so it is easy to import and analyze in Excel
with open(Outputfile, "w") as f:
f.write(s)

# loop through the specified number of iterations
procs = []
print("Starting Iteration:", end='')
for i in range(1, Iterations + 1):
print(str(i), end=',')
proc = multiprocessing.Process(target=WorkPackage, args=(Job_ID, Inputs, Outputs, args, Outputfile, working_dir, PythonPath))
procs.append(proc)
proc.start()
# build the args list
pass_list = [Inputs, Outputs, args, Outputfile, working_dir, PythonPath] # this list never changes

args = []
for i in range(0, Iterations):
args.append(pass_list) # we need to make Iterations number of copies of this list fr the map
args = tuple(args) # convert to a tuple

# complete the processes
for proc in procs:
proc.join()
# Now run the executor with the map - that will run it Iterations number of times
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(WorkPackage, args)

print (os.linesep + "Done with calculations! Summarizing..." + os.linesep)
print(os.linesep + "Done with calculations! Summarizing..." + os.linesep)
logger.info("Done with calculations! Summarizing...")

# read the results into an array
actual_records_count = Iterations
with open(Outputfile, "r") as f:
s = f.readline() # skip the first line
all_results = f.readlines()
Expand All @@ -260,11 +270,10 @@ def main(enable_geophires_logging_config=True):
if "-9999.0" not in line and len(s) > 1:
line = line.strip()
if len(line) > 3:
line, sep, tail = line.partition(', (') # strip off the Input Variable Values
Results.append([float(y) for y in line.split(",")])
else:
logger.warn("space found in line " + str(result_count))
else:
logger.warn("-9999.0 or space found in line " + str(result_count))
logger.warning("-9999.0 or space found in line " + str(result_count))

actual_records_count = len(Results)

Expand All @@ -275,22 +284,20 @@ def main(enable_geophires_logging_config=True):
averages = np.average(Results, 0)
means = np.nanmean(Results, 0)
std = np.nanstd(Results, 0)
var = np.nanvar(Results, 0)

# write them out
with open(Outputfile, "a") as f:
i=0
if Iterations != actual_records_count:
f.write(os.linesep + os.linesep + str(actual_records_count) + " iterations finished successfully and were used to calculate the statistics" + os.linesep + os.linesep)
for output in Outputs:
f.write (output + ":" + os.linesep)
f.write (f" minimum: {mins[i]:,.2f}" + os.linesep)
f.write (f" maximum: {maxs[i]:,.2f}" + os.linesep)
f.write (f" median: {medians[i]:,.2f}" + os.linesep)
f.write (f" average: {averages[i]:,.2f}" + os.linesep)
f.write (f" mean: {means[i]:,.2f}" + os.linesep)
f.write (f" standard deviation: {std[i]:,.2f}" + os.linesep)
f.write (f" variance: {var[i]:,.2f}" + os.linesep)
f.write(output + ":" + os.linesep)
f.write(f" minimum: {mins[i]:,.2f}" + os.linesep)
f.write(f" maximum: {maxs[i]:,.2f}" + os.linesep)
f.write(f" median: {medians[i]:,.2f}" + os.linesep)
f.write(f" average: {averages[i]:,.2f}" + os.linesep)
f.write(f" mean: {means[i]:,.2f}" + os.linesep)
f.write(f" standard deviation: {std[i]:,.2f}" + os.linesep)
i = i + 1

print (" Calculation Time: "+"{0:10.3f}".format((time.time()-tic)) + " sec" + os.linesep)
Expand Down
5 changes: 3 additions & 2 deletions tests/examples/MC_GEOPHIRES_Settings_file.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ INPUT, Ambient Temperature,triangular, 15, 20, 25
OUTPUT, Average Net Electricity Production
OUTPUT, Average Production Temperature
OUTPUT, Average Annual Total Electricity Generation
ITERATIONS, 20
MC_OUTPUT_FILE, D:\Work\python-geophires-x\tests\MC_GEOPHIRES_Result.txt
ITERATIONS, 250
MC_OUTPUT_FILE, MC_GEOPHIRES_Result.txt
PYTHON_PATH, D:\Work\python-geophires-x-nrel\venv\Scripts\python.exe

0 comments on commit 9499fe5

Please sign in to comment.