From 2de142712d2dc8892d216dfca365dc3ba2707c43 Mon Sep 17 00:00:00 2001 From: XiaoYang Date: Thu, 1 Aug 2024 03:05:08 +0800 Subject: [PATCH] Fix ROPE extension issue and device mismatch (#840) * When an exception has been assigned using as target, it is cleared at the end of the except clause.(https://docs.python.org/3/reference/compound_stmts.html#the-try-statement) * Update loader.py * round up to extend rope size * inv_freq.device changed, make sure they are on the same device --------- Co-authored-by: xiaoyang Co-authored-by: Daniel Han --- unsloth/models/llama.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b5244ed4..e6c9280b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -14,6 +14,7 @@ import torch import gc +import math from typing import Optional, Tuple, List, Union from ._utils import * from ._utils import __version__ @@ -1036,7 +1037,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1109,7 +1110,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device=self.inv_freq.device, dtype=torch.int64).float() freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -1158,7 +1159,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass