Skip to content

Commit

Permalink
fix slow CI
Browse files Browse the repository at this point in the history
  • Loading branch information
kashif committed Nov 26, 2024
1 parent 43df3a4 commit 4229c03
Showing 1 changed file with 10 additions and 6 deletions.
16 changes: 10 additions & 6 deletions tests/slow/test_sft_slow.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_sft_trainer_transformers(self, model_name, packing):

model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token
trainer = SFTTrainer(
model,
args=training_args,
Expand Down Expand Up @@ -138,6 +138,7 @@ def test_sft_trainer_peft(self, model_name, packing):

model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token

trainer = SFTTrainer(
model,
Expand Down Expand Up @@ -174,6 +175,7 @@ def test_sft_trainer_transformers_mp(self, model_name, packing):

model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token

trainer = SFTTrainer(
model,
Expand Down Expand Up @@ -209,7 +211,7 @@ def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_chec

model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token
trainer = SFTTrainer(
model,
args=training_args,
Expand Down Expand Up @@ -245,7 +247,7 @@ def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient

model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token
trainer = SFTTrainer(
model,
args=training_args,
Expand Down Expand Up @@ -288,6 +290,7 @@ def test_sft_trainer_transformers_mp_gc_device_map(

model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token

trainer = SFTTrainer(
model,
Expand Down Expand Up @@ -327,7 +330,7 @@ def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gr

model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_name)

tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token
trainer = SFTTrainer(
model,
args=training_args,
Expand Down Expand Up @@ -369,8 +372,9 @@ def test_sft_trainer_with_chat_format_qlora(self, model_name, packing):

model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_name)

model, tokenizer = setup_chat_format(model, tokenizer)
tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token
if tokenizer.chat_template is None:
model, tokenizer = setup_chat_format(model, tokenizer)

trainer = SFTTrainer(
model,
Expand Down

0 comments on commit 4229c03

Please sign in to comment.