From 781bdafcf887119fbb0152474f694de627e4ac57 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Wed, 27 Nov 2024 11:56:39 +0100 Subject: [PATCH] FIX Small regression in BNB LoRA output Our regression tests reveal that the 8bit LoRA BNB regression test is failing. To reproduce, run: pytest tests/regression/test_regression.py -s --regression -k test_lora_8bit The regression was introduced in #2122. We didn't notice this earlier because of other failing tests in the nightly CI. The cause of the error is subtle. In the original code, we would calculate the LoRA output, convert the dtype if necessary, then add it to the base output. After the mentioned PR, we calculate the LoRA output, add it to the base output, then convert the dtype if necessary. The difference is very small on a per layer basis, but it can accumulate over the layers, leading to a significant difference in outputs, as witnessed by the regression test. This PR rolls back this specific part of the PR (both for 8bit and 4bit) while leaving the main change of that PR intact. --- src/peft/tuners/lora/bnb.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/peft/tuners/lora/bnb.py b/src/peft/tuners/lora/bnb.py index 7f51b0ba54..c3f18a63c5 100644 --- a/src/peft/tuners/lora/bnb.py +++ b/src/peft/tuners/lora/bnb.py @@ -235,7 +235,7 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: x = x.to(compute_dtype) if not self.use_dora[active_adapter]: - result = result + lora_B(lora_A(dropout(x))) * scaling + output = lora_B(lora_A(dropout(x))) * scaling else: if isinstance(dropout, torch.nn.Identity) or not self.training: base_result = result @@ -243,7 +243,7 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: x = dropout(x) base_result = None - result = result + self.lora_magnitude_vector[active_adapter]( + output = self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, @@ -252,7 +252,8 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: base_result=base_result, ) if requires_conversion: - result = result.to(expected_dtype) + output = output.to(expected_dtype) + result = result + output return result @@ -490,7 +491,7 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: - result = result + lora_B(lora_A(dropout(x))) * scaling + output = lora_B(lora_A(dropout(x))) * scaling else: if isinstance(dropout, torch.nn.Identity) or not self.training: base_result = result @@ -498,7 +499,7 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: x = dropout(x) base_result = None - result = result + self.lora_magnitude_vector[active_adapter]( + output = self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, @@ -507,7 +508,8 @@ def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: base_result=base_result, ) if requires_conversion: - result = result.to(expected_dtype) + output = output.to(expected_dtype) + result = result + output return result