-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[REVIEW] Fix Padding Related Bugs:
Crossfit
(#66)
* Add crossfit bits Signed-off-by: Vibhu Jawa <[email protected]> * Add padding fixes Signed-off-by: Vibhu Jawa <[email protected]> * Fix test Signed-off-by: Vibhu Jawa <[email protected]> * Add docstrings Signed-off-by: Vibhu Jawa <[email protected]> * fix torch import Signed-off-by: Vibhu Jawa <[email protected]> * fix torch import Signed-off-by: Vibhu Jawa <[email protected]> * fix padding to only pad the last dim Signed-off-by: Vibhu Jawa <[email protected]> * fix padding tests Signed-off-by: Vibhu Jawa <[email protected]> * Add test for left/right Signed-off-by: Vibhu Jawa <[email protected]> * Skip test for cf_loader Signed-off-by: Vibhu Jawa <[email protected]> * Fix bugs in clipping Signed-off-by: Vibhu Jawa <[email protected]> * Fix bugs in clipping Signed-off-by: Vibhu Jawa <[email protected]> * Add early stopping to HF memory estimation Signed-off-by: Vibhu Jawa <[email protected]> * Fix copy-right year Signed-off-by: Vibhu Jawa <[email protected]> * Add copyright year Signed-off-by: Vibhu Jawa <[email protected]> * Address last of Ryan's reviews Signed-off-by: Vibhu Jawa <[email protected]> * Skip loading model if its allready fitted Signed-off-by: Vibhu Jawa <[email protected]> * Use self.load_cfg instead of AutoConfig.from_pretrained Signed-off-by: Vibhu Jawa <[email protected]> * Use self.load_cfg instead of AutoConfig.from_pretrained Signed-off-by: Vibhu Jawa <[email protected]> * Fix memory_curve_utils and skip loading cfg/tokenizer here Signed-off-by: Vibhu Jawa <[email protected]> --------- Signed-off-by: Vibhu Jawa <[email protected]>
- Loading branch information
Showing
10 changed files
with
615 additions
and
98 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,89 @@ | ||
# Copyright 2024 NVIDIA CORPORATION | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import gc | ||
|
||
import joblib | ||
import numpy as np | ||
import torch | ||
from sklearn.linear_model import LinearRegression | ||
from tqdm import tqdm | ||
from transformers import PreTrainedModel | ||
|
||
from crossfit.utils.model_adapter import adapt_model_input | ||
|
||
|
||
def fit_memory_estimate_curve( | ||
model: PreTrainedModel, | ||
path_or_name: str, | ||
start_batch_size: int = 1, | ||
end_batch_size: int = 2048, | ||
batch_size_increment: int = 256, | ||
start_seq_len: int = 1, | ||
end_seq_len: int = 2048, | ||
seq_len_increment: int = 64, | ||
mem_model_path: str = None, | ||
) -> LinearRegression: | ||
print(f"Fitting memory estimate curve for model: {path_or_name}") | ||
|
||
device = next(model.parameters()).device | ||
X: list[list[int]] = [] | ||
y: list[float] = [] | ||
|
||
batch_size_pbar = tqdm( | ||
range(start_batch_size, end_batch_size + 1, batch_size_increment), desc="Batch size" | ||
) | ||
for batch_size in batch_size_pbar: | ||
seq_len_pbar = tqdm( | ||
range(start_seq_len, end_seq_len + 1, seq_len_increment), | ||
desc="Sequence length", | ||
leave=False, | ||
) | ||
for seq_len in seq_len_pbar: | ||
torch.cuda.reset_peak_memory_stats() | ||
|
||
batch = { | ||
"input_ids": torch.randint(1, 501, (batch_size, seq_len)).to(device=device), | ||
"attention_mask": torch.ones((batch_size, seq_len)).to(device=device), | ||
} | ||
|
||
try: | ||
_ = adapt_model_input(model, batch) | ||
memory_used = torch.cuda.max_memory_allocated() / (1024**2) # Convert to MB | ||
X.append([batch_size, seq_len, seq_len**2]) | ||
y.append(memory_used) | ||
|
||
except RuntimeError as e: | ||
if "out of memory" in str(e) or "out_of_memory" in str(e): | ||
# Early stopping for this batch size | ||
seq_len_pbar.close() | ||
break | ||
else: | ||
raise e | ||
finally: | ||
del batch | ||
if "outputs" in vars(): | ||
del outputs | ||
gc.collect() | ||
torch.cuda.empty_cache() | ||
|
||
# Check if we've hit the memory limit for all sequence lengths | ||
if seq_len == start_seq_len: | ||
batch_size_pbar.close() | ||
break | ||
|
||
mem_model = LinearRegression().fit(np.array(X), np.array(y)) | ||
joblib.dump(mem_model, mem_model_path) | ||
|
||
return mem_model |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.