From bd55051887c1a505bb25301ef5bab13dd0f7a19d Mon Sep 17 00:00:00 2001 From: rhoadesScholar Date: Wed, 17 Apr 2024 15:23:23 -0400 Subject: [PATCH] =?UTF-8?q?feat:=20=E2=9C=A8=20Add=20logging=20properties.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added model names and local learning rule strings to improve logability of experiments. --- src/leibnetz/leibnet.py | 3 +++ src/leibnetz/nets/attentive_scalenet.py | 2 +- src/leibnetz/nets/bio.py | 15 ++++++++++++--- src/leibnetz/nets/scalenet.py | 2 +- src/leibnetz/nets/unet.py | 4 +++- src/leibnetz/nodes/node_ops.py | 1 + 6 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/leibnetz/leibnet.py b/src/leibnetz/leibnet.py index fee319d..f0fa324 100644 --- a/src/leibnetz/leibnet.py +++ b/src/leibnetz/leibnet.py @@ -21,6 +21,7 @@ def __init__( outputs: dict[str, Sequence[Tuple]], retain_buffer=True, initialization="kaiming", + name="LeibNet", ): super().__init__() full_node_list = [] @@ -81,6 +82,8 @@ def __init__( else: self.cpu() + self.name = name + def assemble(self, outputs: dict[str, Sequence[Tuple]]): """ NOTE: If your scales are non-integer realworld units, you need to treat the scale as integer factors instead. diff --git a/src/leibnetz/nets/attentive_scalenet.py b/src/leibnetz/nets/attentive_scalenet.py index 03fd71e..5f5bb01 100644 --- a/src/leibnetz/nets/attentive_scalenet.py +++ b/src/leibnetz/nets/attentive_scalenet.py @@ -166,7 +166,7 @@ def build_attentive_scale_net(subnet_dict_list: list[dict]): output = subnet_outputs.pop(f"{subnet_id}_output") outputs[f"{subnet_id}_output"] = output bottleneck_input_dict = subnet_outputs - network = LeibNet(nodes, outputs=outputs) + network = LeibNet(nodes, outputs=outputs, name="AttentiveScaleNet") return network diff --git a/src/leibnetz/nets/bio.py b/src/leibnetz/nets/bio.py index 21b39bf..891966d 100644 --- a/src/leibnetz/nets/bio.py +++ b/src/leibnetz/nets/bio.py @@ -27,6 +27,9 @@ def __init__(self): def init_layers(self, model): pass + def __str__(self): + return self.__class__.__name__ + @abstractmethod def update(self, x, w): pass @@ -38,6 +41,9 @@ def __init__(self, c=0.1): super().__init__() self.c = c + def __str__(self): + return f"HebbsRule(c={self.c})" + def update(self, inputs: torch.Tensor, weights: torch.Tensor): # TODO: Needs re-implementation d_ws = torch.zeros(inputs.size(0)) @@ -74,6 +80,9 @@ def __init__(self, precision=1e-30, delta=0.4, norm=2, k=2, normalize=False): self.k = k self.normalize = normalize + def __str__(self): + return f"KrotovsRule(precision={self.precision}, delta={self.delta}, norm={self.norm}, k={self.k})" + def init_layers(self, layer): if hasattr(layer, "weight"): layer.weight.data.normal_(mean=0.0, std=1.0) @@ -131,6 +140,9 @@ def __init__(self, c=0.1): super().__init__() self.c = c + def __str__(self): + return f"OjasRule(c={self.c})" + def update(self, inputs: torch.Tensor, weights: torch.Tensor): # TODO: needs re-implementation d_ws = torch.zeros(inputs.size(0), *weights.shape) @@ -242,6 +254,3 @@ def convert_to_backprop(model: LeibNet): ) return model - - -# %% diff --git a/src/leibnetz/nets/scalenet.py b/src/leibnetz/nets/scalenet.py index 71ce13f..76f18f1 100644 --- a/src/leibnetz/nets/scalenet.py +++ b/src/leibnetz/nets/scalenet.py @@ -147,7 +147,7 @@ def build_scale_net(subnet_dict_list: list[dict]): output = subnet_outputs.pop(f"{subnet_id}_output") outputs[f"{subnet_id}_output"] = output bottleneck_input_dict = subnet_outputs - network = LeibNet(nodes, outputs=outputs) + network = LeibNet(nodes, outputs=outputs, name="ScaleNet") return network diff --git a/src/leibnetz/nets/unet.py b/src/leibnetz/nets/unet.py index acba1a3..33c7240 100644 --- a/src/leibnetz/nets/unet.py +++ b/src/leibnetz/nets/unet.py @@ -114,7 +114,9 @@ def build_unet( # define network network = LeibNet( - nodes, outputs={"output": [tuple(np.ones(len(top_resolution))), top_resolution]} + nodes, + outputs={"output": [tuple(np.ones(len(top_resolution))), top_resolution]}, + name="UNet", ) return network diff --git a/src/leibnetz/nodes/node_ops.py b/src/leibnetz/nodes/node_ops.py index 60367fc..083ce9d 100644 --- a/src/leibnetz/nodes/node_ops.py +++ b/src/leibnetz/nodes/node_ops.py @@ -75,6 +75,7 @@ def __init__( layers = [] for i, kernel_size in enumerate(kernel_sizes): + # TODO: Use of BatchNorm does not work with bio-inspired learning rules if norm_layer is not None: layers.append(norm_layer(input_nc))