Skip to content

Commit

Permalink
Add rule to train network and save models and artifacts.
Browse files Browse the repository at this point in the history
  • Loading branch information
rahmans1 committed Jan 24, 2024
1 parent ec5d176 commit d89bae8
Show file tree
Hide file tree
Showing 2 changed files with 134 additions and 36 deletions.
114 changes: 99 additions & 15 deletions benchmarks/roman_pots/Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,23 @@ DETECTOR_VERSION = os.environ["DETECTOR_VERSION"]
SUBSYSTEM = "roman_pots"
BENCHMARK = "dense_neural_network"
DETECTOR_CONFIG = ["epic_ip6"]
NUM_TRAINING_INPUTS = [10,100]
NUM_EPOCHS_PZ = [1000,10000]
NUM_EPOCHS_PZ = [100]
LEARNING_RATE_PZ = [0.01]
SIZE_INPUT_PZ = [4]
SIZE_OUTPUT_PZ = [1]
N_LAYERS_PZ = [3,6]
SIZE_FIRST_HIDDEN_LAYER_PZ = [128]
MULTIPLIER_PZ = [0.5]
LEAK_RATE_PZ = [0.025]
NUM_EPOCHS_PY = [1000,10000]
NUM_EPOCHS_PY = [100]
LEARNING_RATE_PY = [0.01]
SIZE_INPUT_PY = [3]
SIZE_OUTPUT_PY = [1]
N_LAYERS_PY = [3,6]
SIZE_FIRST_HIDDEN_LAYER_PY = [128]
MULTIPLIER_PY = [0.5]
LEAK_RATE_PY = [0.025]
NUM_EPOCHS_PX = [1000,10000]
NUM_EPOCHS_PX = [100]
LEARNING_RATE_PX = [0.01]
SIZE_INPUT_PX = [3]
SIZE_OUTPUT_PX = [1]
Expand All @@ -32,18 +31,19 @@ SIZE_FIRST_HIDDEN_LAYER_PX = [128]
MULTIPLIER_PX = [0.5]
LEAK_RATE_PX = [0.025]
MAX_HASH = 6
NFILES = range(1,11)
NEVENTS_PER_FILE = [100]
NUM_TRAINING_INPUTS = [int(0.5*max(NFILES)),int(0.7*max(NFILES))]
MODEL_VERSION = [
hashlib.sha512("_".join(map(str,x)).encode()).hexdigest()[:MAX_HASH]
for x in product(
NUM_TRAINING_INPUTS,
NEVENTS_PER_FILE, NUM_TRAINING_INPUTS,
NUM_EPOCHS_PZ, LEARNING_RATE_PZ, SIZE_INPUT_PZ, SIZE_OUTPUT_PZ, N_LAYERS_PZ, SIZE_FIRST_HIDDEN_LAYER_PZ, MULTIPLIER_PZ, LEAK_RATE_PZ,
NUM_EPOCHS_PY, LEARNING_RATE_PY, SIZE_INPUT_PY, SIZE_OUTPUT_PY, N_LAYERS_PY, SIZE_FIRST_HIDDEN_LAYER_PY, MULTIPLIER_PY, LEAK_RATE_PY,
NUM_EPOCHS_PX, LEARNING_RATE_PX, SIZE_INPUT_PX, SIZE_OUTPUT_PX, N_LAYERS_PX, SIZE_FIRST_HIDDEN_LAYER_PX, MULTIPLIER_PX, LEAK_RATE_PX
)
]
INPUT_STEERING_FILE = "steering_file.py"
NFILES = range(1,11)
NEVENTS_PER_FILE = 100

rule all:
input:
Expand All @@ -54,10 +54,29 @@ rule all:
detector_config=DETECTOR_CONFIG,
index=NFILES),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/metadata/"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.txt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_pz_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_py_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_px_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_pz_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_py_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_px_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION)



rule roman_pots_generate_events:
input:
output:
Expand All @@ -82,30 +101,95 @@ rule preprocess_model_training_data:
for f_input in input.data:
os.system("root -q -b "+str(input.script)+"\"(\\\""+str(f_input)+"\\\",\\\""+str(f_input.replace(".edm4hep.root",".txt").replace("raw_data","processed_data"))+"\\\")\"")



rule roman_pots_generate_neural_network_configs:
input:
output:
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/metadata/"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.txt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION)
run:
for detector_config, num_training_inputs, \
for detector_config, nevents_per_file, num_training_inputs, \
num_epochs_pz, learning_rate_pz, size_input_pz, size_output_pz, n_layers_pz, size_first_hidden_layer_pz, multiplier_pz, leak_rate_pz, \
num_epochs_py, learning_rate_py, size_input_py, size_output_py, n_layers_py, size_first_hidden_layer_py, multiplier_py, leak_rate_py, \
num_epochs_px, learning_rate_px, size_input_px, size_output_px, n_layers_px, size_first_hidden_layer_px, multiplier_px, leak_rate_px in \
product(DETECTOR_CONFIG, NUM_TRAINING_INPUTS,
product(DETECTOR_CONFIG, NEVENTS_PER_FILE, NUM_TRAINING_INPUTS,
NUM_EPOCHS_PZ, LEARNING_RATE_PZ, SIZE_INPUT_PZ, SIZE_OUTPUT_PZ, N_LAYERS_PZ, SIZE_FIRST_HIDDEN_LAYER_PZ, MULTIPLIER_PZ, LEAK_RATE_PZ,
NUM_EPOCHS_PY, LEARNING_RATE_PY, SIZE_INPUT_PY, SIZE_OUTPUT_PY, N_LAYERS_PY, SIZE_FIRST_HIDDEN_LAYER_PY, MULTIPLIER_PY, LEAK_RATE_PY,
NUM_EPOCHS_PX, LEARNING_RATE_PX, SIZE_INPUT_PX, SIZE_OUTPUT_PX, N_LAYERS_PX, SIZE_FIRST_HIDDEN_LAYER_PX, MULTIPLIER_PX, LEAK_RATE_PX):
output_dir = "results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/metadata"
output_file = str(num_training_inputs)+"_"+\
output_file = str(nevents_per_file)+"_"+str(num_training_inputs)+"_"+\
str(num_epochs_pz)+"_"+str(learning_rate_pz)+"_"+str(size_input_pz)+"_"+str(size_output_pz)+"_"+str(n_layers_pz)+"_"+str(size_first_hidden_layer_pz)+"_"+str(multiplier_pz)+"_"+str(leak_rate_pz)+"_"+\
str(num_epochs_py)+"_"+str(learning_rate_py)+"_"+str(size_input_py)+"_"+str(size_output_py)+"_"+str(n_layers_py)+"_"+str(size_first_hidden_layer_py)+"_"+str(multiplier_py)+"_"+str(leak_rate_py)+"_"+\
str(num_epochs_px)+"_"+str(learning_rate_px)+"_"+str(size_input_px)+"_"+str(size_output_px)+"_"+str(n_layers_px)+"_"+str(size_first_hidden_layer_px)+"_"+str(multiplier_px)+"_"+str(leak_rate_px)
output_file_location = open(str(output_dir)+"/"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(hashlib.sha512(output_file.encode()).hexdigest()[:MAX_HASH])+".txt","w")
output_file_location.write(output_file)
model_hash = hashlib.sha512(output_file.encode()).hexdigest()[:MAX_HASH]
output_file_location = open(str(output_dir)+"/"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_hash)+".txt","w")
output_file_content = "--input_files\nresults/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/processed_data/"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_\n"+\
"--model_version\n"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_hash)+"\n"+\
"--nevents_per_file\n"+str(nevents_per_file)+"\n"+\
"--num_training_inputs\n"+str(num_training_inputs)+"\n"+\
"--num_epochs_pz\n"+str(num_epochs_pz)+"\n"+\
"--learning_rate_pz\n"+str(learning_rate_pz)+"\n"+\
"--size_input_pz\n"+str(size_input_pz)+"\n"+\
"--size_output_pz\n"+str(size_output_pz)+"\n"+\
"--n_layers_pz\n"+str(n_layers_pz)+"\n"+\
"--size_first_hidden_layer_pz\n"+str(size_first_hidden_layer_pz)+"\n"+\
"--multiplier_pz\n"+str(multiplier_pz)+"\n"+\
"--leak_rate_pz\n"+str(leak_rate_pz)+"\n"+\
"--num_epochs_py\n"+str(num_epochs_py)+"\n"+\
"--learning_rate_py\n"+str(learning_rate_py)+"\n"+\
"--size_input_py\n"+str(size_input_py)+"\n"+\
"--size_output_py\n"+str(size_output_py)+"\n"+\
"--n_layers_py\n"+str(n_layers_py)+"\n"+\
"--size_first_hidden_layer_py\n"+str(size_first_hidden_layer_py)+"\n"+\
"--multiplier_py\n"+str(multiplier_py)+"\n"+\
"--leak_rate_py\n"+str(leak_rate_py)+"\n"+\
"--num_epochs_px\n"+str(num_epochs_px)+"\n"+\
"--learning_rate_px\n"+str(learning_rate_px)+"\n"+\
"--size_input_px\n"+str(size_input_px)+"\n"+\
"--size_output_px\n"+str(size_output_px)+"\n"+\
"--n_layers_px\n"+str(n_layers_px)+"\n"+\
"--size_first_hidden_layer_px\n"+str(size_first_hidden_layer_px)+"\n"+\
"--multiplier_px\n"+str(multiplier_px)+"\n"+\
"--leak_rate_px\n"+str(leak_rate_px)
output_file_location.write(output_file_content)
print(output_file_location)
output_file_location.close()


rule roman_pots_train_neural_networks:
input:
script = "train_dense_neural_network.py"
output:
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_pz_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_py_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/model_px_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.pt",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_pz_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_py_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION),
expand("results/"+str(DETECTOR_VERSION)+"/{detector_config}/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/LossVsEpoch_model_px_"+str(DETECTOR_VERSION)+"_{detector_config}_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_{model_version}.png",
detector_config=DETECTOR_CONFIG,
model_version=MODEL_VERSION)

run:
for detector_config, model_version in product(DETECTOR_CONFIG,MODEL_VERSION):
os.system("mkdir -p results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models")
os.system("python "+str(input.script)+" results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/metadata/"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".txt")
os.system("mv model_pz_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".pt results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/")
os.system("mv model_py_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".pt results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/")
os.system("mv model_px_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".pt results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/trained_models/")
os.system("mv LossVsEpoch_model_pz_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".png results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/")
os.system("mv LossVsEpoch_model_py_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".png results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/")
os.system("mv LossVsEpoch_model_px_"+str(DETECTOR_VERSION)+"_"+str(detector_config)+"_"+str(SUBSYSTEM)+"_"+str(BENCHMARK)+"_"+str(model_version)+".png results/"+str(DETECTOR_VERSION)+"/"+str(detector_config)+"/detector_benchmarks/"+str(SUBSYSTEM)+"/"+str(BENCHMARK)+"/artifacts/")





56 changes: 35 additions & 21 deletions benchmarks/roman_pots/train_dense_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,24 @@ def standardize(x):
standardized_tensor = (x - mean) / std
return standardized_tensor, mean, std

def train_model(input_tensor, target_tensor, model, num_epochs, learning_rate):
def train_model(name, input_tensor, target_tensor, model, hyperparameters):
# Set hyperparameters
match name:
case "model_pz":
num_epochs = int(hyperparameters.num_epochs_pz)
learning_rate = float(hyperparameters.learning_rate_pz)
case "model_py":
num_epochs = int(hyperparameters.num_epochs_py)
learning_rate = float(hyperparameters.learning_rate_py)
case "model_px":
num_epochs = int(hyperparameters.num_epochs_px)
learning_rate = float(hyperparameters.learning_rate_px)
case _:
print("No model name provided. Return without further processing")
return
print("Set number of epochs and learning rate to "+str(num_epochs)+" and "+str(learning_rate)+" for "+str(name)+" training.")


# Send model to device
model=model.to(device)

Expand Down Expand Up @@ -84,28 +101,28 @@ def train_model(input_tensor, target_tensor, model, num_epochs, learning_rate):
print("Epoch "+str(epoch+1)+"/"+str(num_epochs)+", Loss: "+"{0:0.10f}".format(loss.item()))

# Plot the loss values
plt.figure()
plt.plot(range(1, num_epochs+1), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss as a Function of Epoch')
plt.savefig('Loss vs Epoch')
plt.yscale('log')
plt.savefig("LossVsEpoch_"+name+"_"+str(hyperparameters.model_version)+".png")

return model
torch.jit.script(model).save(name+"_"+str(hyperparameters.model_version)+".pt")
return

def run_experiment(hyperparameters):

# Load input and target training data in tensors
training_RP_pos = pd.DataFrame()
training_MC_mom = pd.DataFrame()
# Load training data in tensors
training_data = pd.DataFrame()

for i in range(1,int(hyperparameters.num_training_inputs)+1):
temp_training_RP_pos = pd.read_csv(hyperparameters.input_files+str(i)+'.txt', delimiter='\t', header=None)
training_RP_pos = pd.concat([training_RP_pos, temp_training_RP_pos], ignore_index=True)
temp_training_MC_mom = pd.read_csv(hyperparameters.target_files+str(i)+'.txt', delimiter='\t', header=None)
training_MC_mom = pd.concat([training_MC_mom, temp_training_MC_mom], ignore_index=True)
temp_training_data = pd.read_csv(hyperparameters.input_files+str(i)+'.txt', delimiter='\t', header=None)
training_data = pd.concat([training_data, temp_training_data], ignore_index=True)

training_RP_pos_tensor = torch.tensor(training_RP_pos.values, dtype=torch.float32)
training_MC_mom_tensor = torch.tensor(training_MC_mom.values, dtype=torch.float32)
training_RP_pos_tensor = torch.tensor(training_data.iloc[:,3:7].values, dtype=torch.float32)
training_MC_mom_tensor = torch.tensor(training_data.iloc[:,0:3].values, dtype=torch.float32)

# Standardize training data
source_pz = training_RP_pos_tensor
Expand Down Expand Up @@ -141,19 +158,16 @@ def run_experiment(hyperparameters):
leak_rate=float(hyperparameters.leak_rate_px))

# Train models
model_pz = train_model(scaled_source_pz, target_pz, initial_model_pz, num_epochs=int(hyperparameters.num_epochs_pz), learning_rate=float(hyperparameters.learning_rate_pz))
model_py = train_model(scaled_source_py, target_py, initial_model_py, num_epochs=int(hyperparameters.num_epochs_py), learning_rate=float(hyperparameters.learning_rate_py))
model_px = train_model(scaled_source_px, target_px, initial_model_px, num_epochs=int(hyperparameters.num_epochs_px), learning_rate=float(hyperparameters.learning_rate_px))

# Save models
torch.jit.script(model_pz).save('model_pz.pt')
torch.jit.script(model_py).save('model_py.pt')
torch.jit.script(model_px).save('model_px.pt')
train_model("model_pz", scaled_source_pz, target_pz, initial_model_pz, hyperparameters)
train_model("model_py", scaled_source_py, target_py, initial_model_py, hyperparameters)
train_model("model_px", scaled_source_px, target_px, initial_model_px, hyperparameters)

# Print end statement
print("Training completed using "+str(int(hyperparameters.nevents_per_file)*int(hyperparameters.num_training_inputs))+" generated events.")

if __name__ == "__main__":
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
hyperparameters_list = ['--input_files', '--target_files', '--num_training_inputs',
hyperparameters_list = ['--input_files', '--model_version', '--nevents_per_file', '--num_training_inputs',
'--num_epochs_pz', '--learning_rate_pz', '--size_input_pz', '--size_output_pz', '--n_layers_pz', '--size_first_hidden_layer_pz', '--multiplier_pz', '--leak_rate_pz',
'--num_epochs_py', '--learning_rate_py', '--size_input_py', '--size_output_py', '--n_layers_py', '--size_first_hidden_layer_py', '--multiplier_py', '--leak_rate_py',
'--num_epochs_px', '--learning_rate_px', '--size_input_px', '--size_output_px', '--n_layers_px', '--size_first_hidden_layer_px', '--multiplier_px', '--leak_rate_px']
Expand Down

0 comments on commit d89bae8

Please sign in to comment.