From adeda24054d2e602be4e4b6dd9a19cdb74084601 Mon Sep 17 00:00:00 2001 From: Anwai Archit <52396323+anwai98@users.noreply.github.com> Date: Wed, 13 Nov 2024 16:11:46 +0100 Subject: [PATCH] Update peft_sam.py --- micro_sam/models/peft_sam.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/micro_sam/models/peft_sam.py b/micro_sam/models/peft_sam.py index 9b37ceeb..c1b5dcc6 100644 --- a/micro_sam/models/peft_sam.py +++ b/micro_sam/models/peft_sam.py @@ -23,11 +23,11 @@ class LoRASurgery(nn.Module): rank: The rank of the decomposition matrices for updating weights in each attention layer. block: The chosen attention blocks for implementing lora. """ - def __init__(self, rank: int, block: nn.Module, alpha: float = 1): + def __init__(self, rank: int, block: nn.Module): super().__init__() self.qkv_proj = block.attn.qkv self.dim = self.qkv_proj.in_features - self.alpha = alpha + self.alpha = 1 # From our experiments, 'alpha' as 1 gives the best performance. self.rank = rank self.w_a_linear_q = nn.Linear(self.dim, self.rank, bias=False)