diff --git a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py b/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py index c35d68979..e0f012ea1 100644 --- a/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py +++ b/algorithmic_efficiency/workloads/fastmri/fastmri_pytorch/models.py @@ -130,7 +130,7 @@ def __init__(self, nn.Dropout2d(dropout_rate), nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False), norm_layer, - nn.LeakyReLU(negative_slope=0.2, inplace=True), + activation_fn, nn.Dropout2d(dropout_rate), ) @@ -156,7 +156,6 @@ def __init__(self, norm_layer = nn.InstanceNorm2d(out_chans) if use_tanh: activation_fn = nn.Tanh(inplace=True) - activation_fn = nn.LeakyReLU(negative_slope=0.2, inplace=True) else: activation_fn = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.layers = nn.Sequential( diff --git a/algorithmic_efficiency/workloads/fastmri/workload.py b/algorithmic_efficiency/workloads/fastmri/workload.py index 0b815eb1b..dde888777 100644 --- a/algorithmic_efficiency/workloads/fastmri/workload.py +++ b/algorithmic_efficiency/workloads/fastmri/workload.py @@ -13,6 +13,8 @@ class BaseFastMRIWorkload(spec.Workload): def target_metric_name(self) -> str: """The name of the target metric (useful for scoring/processing code).""" return 'ssim' + + @property def use_layer_norm(self) -> bool: """Whether or not to use LayerNorm in the model.""" return False