Skip to content

Commit

Permalink
Update peft_sam.py
Browse files Browse the repository at this point in the history
  • Loading branch information
anwai98 authored Nov 13, 2024
1 parent 98e9fcf commit adeda24
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions micro_sam/models/peft_sam.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ class LoRASurgery(nn.Module):
rank: The rank of the decomposition matrices for updating weights in each attention layer.
block: The chosen attention blocks for implementing lora.
"""
def __init__(self, rank: int, block: nn.Module, alpha: float = 1):
def __init__(self, rank: int, block: nn.Module):
super().__init__()
self.qkv_proj = block.attn.qkv
self.dim = self.qkv_proj.in_features
self.alpha = alpha
self.alpha = 1 # From our experiments, 'alpha' as 1 gives the best performance.
self.rank = rank

self.w_a_linear_q = nn.Linear(self.dim, self.rank, bias=False)
Expand Down

0 comments on commit adeda24

Please sign in to comment.