Skip to content

Commit

Permalink
Use number of epochs and learning rate as training hyper parameters
Browse files Browse the repository at this point in the history
  • Loading branch information
rahmans1 authored Jan 12, 2024
1 parent 1a5d06e commit 7398a0b
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions benchmarks/roman_pots/train_dense_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@ def standardize(x):
standardized_tensor = (x - mean) / std
return standardized_tensor, mean, std

def train_model(input_tensor, target_tensor, model):
def train_model(input_tensor, target_tensor, model, num_epochs, learning_rate):

# Define the loss function and optimizer
criterion = torch.nn.HuberLoss(reduction='mean', delta=1.0)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Create a learning rate scheduler
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,'min',patience=100,cooldown=100,factor=0.5,threshold=1e-4,verbose=True)
Expand Down

0 comments on commit 7398a0b

Please sign in to comment.