From d3d701a7ebb30a93b8a72123b5222b2ec95a8f9c Mon Sep 17 00:00:00 2001 From: Jaylenne Date: Fri, 1 Jul 2022 17:26:08 +0800 Subject: [PATCH] simLSH revised --- LSHsim.py | 10 ++++++---- configs/lshconfig.config | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/LSHsim.py b/LSHsim.py index e018a29..70a6d62 100644 --- a/LSHsim.py +++ b/LSHsim.py @@ -47,10 +47,12 @@ parser.add_argument('--update', action='store_true', help='Do the binary memory update') parser.add_argument('--sum-argmax', action='store_true', help='Do the sum argmax') parser.add_argument('--real-eval', action='store_true', help='Do the real value model evaluation') +parser.add_argument('--ch-last', default=128, type=int, + help='Channel number of the last convolution layers in CNN, to match the parameter count') class Net(nn.Module): - def __init__(self, input_shape, keydim=128): + def __init__(self, input_shape, keydim=128, ch_last=args.ch_last): super(Net, self).__init__() # Constants kernel = 3 @@ -62,10 +64,10 @@ def __init__(self, input_shape, keydim=128): self.conv2 = nn.Conv2d(64, 64, kernel, padding=(0, 0)) self.conv3 = nn.Conv2d(64, 128, kernel, padding=(pad, pad)) self.conv4 = nn.Conv2d(128, 128, kernel, padding=(pad, pad)) - self.conv5 = nn.Conv2d(128, 256, kernel, padding=(pad, pad)) - self.conv6 = nn.Conv2d(256, 256, kernel, padding=(pad, pad)) + self.conv5 = nn.Conv2d(128, ch_last, kernel, padding=(pad, pad)) + self.conv6 = nn.Conv2d(ch_last, ch_last, kernel, padding=(pad, pad)) self.pool = nn.MaxPool2d(2, 2) - self.fc1 = nn.Linear(2304, keydim) + self.fc1 = nn.Linear(9 * ch_last, keydim) self.dropout = nn.Dropout(p) def forward(self, x): diff --git a/configs/lshconfig.config b/configs/lshconfig.config index 3c799c4..5980cd8 100644 --- a/configs/lshconfig.config +++ b/configs/lshconfig.config @@ -1,5 +1,6 @@ save-dir ./results/32dim/model_best.pth.tar key-dim 32 +ch-last 256 lshdim [8, 16, 32, 64, 128, 256, 512, 1024, 2048] eval-way 5 eval-shot 1