Skip to content

Commit

Permalink
use pytest.mark directly (huggingface#27390)
Browse files Browse the repository at this point in the history
fix

Co-authored-by: ydshieh <[email protected]>
  • Loading branch information
ydshieh and ydshieh authored Nov 9, 2023
1 parent 791ec37 commit 3258ff9
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 14 deletions.
6 changes: 3 additions & 3 deletions tests/models/bark/test_modeling_bark.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import tempfile
import unittest

from pytest import mark
import pytest

from transformers import (
BarkCoarseConfig,
Expand Down Expand Up @@ -877,7 +877,7 @@ def test_resize_embeddings_untied(self):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference(self):
for model_class in self.all_model_classes:
Expand Down Expand Up @@ -936,7 +936,7 @@ def test_flash_attn_2_inference(self):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
for model_class in self.all_model_classes:
Expand Down
6 changes: 3 additions & 3 deletions tests/models/distilbert/test_modeling_distilbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import tempfile
import unittest

from pytest import mark
import pytest

from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device
Expand Down Expand Up @@ -290,7 +290,7 @@ def test_torchscript_device_change(self):
# Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test.
@require_flash_attn
@require_torch_accelerator
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference(self):
import torch
Expand Down Expand Up @@ -344,7 +344,7 @@ def test_flash_attn_2_inference(self):
# Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test.
@require_flash_attn
@require_torch_accelerator
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
import torch
Expand Down
4 changes: 2 additions & 2 deletions tests/models/llama/test_modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

import unittest

import pytest
from parameterized import parameterized
from pytest import mark

from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import (
Expand Down Expand Up @@ -385,7 +385,7 @@ def test_model_rope_scaling(self, scaling_type):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_generate_padding_right(self):
"""
Expand Down
6 changes: 3 additions & 3 deletions tests/models/mistral/test_modeling_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import tempfile
import unittest

from pytest import mark
import pytest

from transformers import AutoTokenizer, MistralConfig, is_torch_available
from transformers.testing_utils import (
Expand Down Expand Up @@ -369,7 +369,7 @@ def test_past_key_values_format(self):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_generate_padding_right(self):
import torch
Expand Down Expand Up @@ -403,7 +403,7 @@ def test_flash_attn_2_generate_padding_right(self):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
import torch
Expand Down
6 changes: 3 additions & 3 deletions tests/models/whisper/test_modeling_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import unittest

import numpy as np
from pytest import mark
import pytest

import transformers
from transformers import WhisperConfig
Expand Down Expand Up @@ -800,7 +800,7 @@ def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference(self):
import torch
Expand Down Expand Up @@ -845,7 +845,7 @@ def test_flash_attn_2_inference(self):

@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
import torch
Expand Down

0 comments on commit 3258ff9

Please sign in to comment.