From 7914b195c0a41ed01823ceac469b58407d9de021 Mon Sep 17 00:00:00 2001 From: Michael Wyatt Date: Thu, 14 Dec 2023 16:12:46 -0800 Subject: [PATCH 1/7] fix for tests using torch<2.1 (#4818) Our torch 1.10 tests have been failling since the merge of #4569. This added a `device_type` kwarg to the `torch.random.fork_rng` call. But this is not compatible with older versions of torch. Added in https://github.com/pytorch/pytorch/pull/98069 Fixes #4644, #4503 --- tests/unit/alexnet_model.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/unit/alexnet_model.py b/tests/unit/alexnet_model.py index e3be2be4894d..cf533063d6ec 100644 --- a/tests/unit/alexnet_model.py +++ b/tests/unit/alexnet_model.py @@ -11,6 +11,7 @@ import deepspeed import deepspeed.comm as dist import deepspeed.runtime.utils as ds_utils +from deepspeed.runtime.utils import required_torch_version from deepspeed.accelerator import get_accelerator from deepspeed.runtime.pipe.module import PipelineModule, LayerSpec @@ -111,8 +112,11 @@ def cifar_trainset(fp16=False): def train_cifar(model, config, num_steps=400, average_dp_losses=True, fp16=True, seed=123): - with get_accelerator().random().fork_rng(devices=[get_accelerator().current_device_name()], - device_type=get_accelerator().device_name()): + if required_torch_version(min_version=2.1): + fork_kwargs = {"device_type": get_accelerator().device_name()} + else: + fork_kwargs = {} + with get_accelerator().random().fork_rng(devices=[get_accelerator().current_device_name()], **fork_kwargs): ds_utils.set_random_seed(seed) # disable dropout From 8998707a2fc8584712a4cb3dc465d02e7d9f50da Mon Sep 17 00:00:00 2001 From: Sam Ade Jacobs Date: Fri, 15 Dec 2023 13:22:39 -0500 Subject: [PATCH 2/7] Universal Checkpoint for Sequence Parallelism (#4752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR extends the [universal checkpoint](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples_deepspeed/universal_checkpointing) to support DS sequence parallelism and training scenarios where pipeline parallelism is not enabled. The attached Tensorboard chart show a training scenario (validation curve) where a GPT model is pre-trained with data parallelism (4 GPUs), and checkpoints are saved at the 100th and 200th iterations. The checkpoint at the 100th iteration is later loaded for continual pre-training with different configurations (more GPU resources, data parallelism = 4 GPUs, sequence parallelism = 2 GPUs). Screenshot 2023-11-28 at 9 11 55 AM --------- Co-authored-by: Michael Wyatt --- deepspeed/checkpoint/deepspeed_checkpoint.py | 36 +++++++++++++------- deepspeed/checkpoint/ds_to_universal.py | 8 ++--- deepspeed/checkpoint/reshape_3d_utils.py | 2 +- deepspeed/checkpoint/universal_checkpoint.py | 2 +- deepspeed/runtime/zero/stage_1_and_2.py | 4 ++- 5 files changed, 33 insertions(+), 19 deletions(-) diff --git a/deepspeed/checkpoint/deepspeed_checkpoint.py b/deepspeed/checkpoint/deepspeed_checkpoint.py index 77634222d292..8312dddd2fa6 100644 --- a/deepspeed/checkpoint/deepspeed_checkpoint.py +++ b/deepspeed/checkpoint/deepspeed_checkpoint.py @@ -34,7 +34,10 @@ class DeepSpeedCheckpoint(object): def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): self.dir = dir - self._validate_folder(dir) + + pipeline_parallel = len(get_files_with_prefix(get_files(dir), LAYER_FILE_PREFIX)) > 0 + + self._validate_folder(dir, pipeline_parallel) self.zero_checkpoint = ZeROCheckpoint(dir) @@ -193,7 +196,10 @@ def get_final_norm_files(self, tp_index: int) -> list: return self.tp_to_final_norm_map[tp_index] def _build_tp_other_layer_map(self, layer_index: int): - assert layer_index < len(self.layer_files) + data_map = {} + if len(self.layer_files) < 1: + return data_map + assert layer_index <= len(self.layer_files) layer_files = get_files_with_prefix(self.layer_files, self.layer_keys[layer_index]) layer_file_partitions = partition_data(layer_files, self.tp_degree) data_map = {i: flist for i, flist in enumerate(layer_file_partitions)} @@ -207,9 +213,13 @@ def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list: def _build_pp_transformer_map(self): data_map = {} - transformer_layers = self.layer_keys[1:-1] - layers_per_pp = len(transformer_layers) // self.pp_degree - data_map = {i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] for i in range(0, self.pp_degree)} + if self.pp_degree > 0: + transformer_layers = self.layer_keys[1:-1] + layers_per_pp = len(transformer_layers) // self.pp_degree + data_map = { + i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] + for i in range(0, self.pp_degree) + } return data_map def _dump_mapping(self, data_map, map_tag=None): @@ -222,9 +232,9 @@ def _build_transformer_file_map(self): transformer_layer_keys = self.layer_keys[1:-1] file_map = {} # XXX: this is not guaranteed - layers_per_pp = len(transformer_layer_keys) // self.pp_degree - if layers_per_pp == 0: - layers_per_pp = 1 + layers_per_pp = 1 + if self.pp_degree > 0: + layers_per_pp = len(transformer_layer_keys) // self.pp_degree #print(f"{transformer_layer_keys} {layers_per_pp}") for key_index, layer_key in enumerate(transformer_layer_keys): pp_index = key_index // layers_per_pp @@ -240,8 +250,8 @@ def _build_transformer_file_map(self): def _sanity_check(self): assert len(self.mp_rank_files) % self.tp_degree == 0 - assert len(self.layer_keys) > 2 assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0 + assert self.zero_checkpoint.num_files % (self.tp_degree) == 0 # XXX: fix me - isn't always the case # only true with --pp-partition-method 'type:transformer|embedding' \ # assert (len(self.layer_keys) - 2) % self.pp_degree == 0 @@ -270,12 +280,14 @@ def _merge_state_dicts(self, sd_list): return merged_sd - def _validate_folder(self, dir): + def _validate_folder(self, dir, pipeline_parallel): basic_folder_validation(dir) file_list = get_files(dir) - - for file_prefix in [MODEL_FILE_PREFIX, LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01']: + file_prefix_list = [MODEL_FILE_PREFIX] + if pipeline_parallel: + file_prefix_list.extend([LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01']) + for file_prefix in file_prefix_list: ckpt_files = get_files_with_prefix(file_list, file_prefix) assert len( ckpt_files diff --git a/deepspeed/checkpoint/ds_to_universal.py b/deepspeed/checkpoint/ds_to_universal.py index 8be187aa89c2..f40c5630899d 100755 --- a/deepspeed/checkpoint/ds_to_universal.py +++ b/deepspeed/checkpoint/ds_to_universal.py @@ -15,7 +15,7 @@ import shutil import torch import tqdm -# from pprint import pprint +#from pprint import pprint from deepspeed.checkpoint import DeepSpeedCheckpoint from deepspeed.checkpoint import ( @@ -241,9 +241,9 @@ def _extract_zero_shard_files(args, ds_checkpoint, temp_dir): _3d_range_list = list( itertools.product(range(ds_checkpoint.pp_degree), range(ds_checkpoint.tp_degree), range(ds_checkpoint.dp_degree))) - # pprint(f'{_3d_range_list=}') + #pprint(f'{_3d_range_list=}') work_chunks = list(_get_chunks(_3d_range_list, args.num_extract_workers)) - # pprint(f'{work_chunks=}') + #pprint(f'{work_chunks=}') # extract_zero_shards(temp_dir, ds_checkpoint, _3d_range_list[0]) do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint) @@ -309,7 +309,7 @@ def main(): print('*** 1. Extracting ZeRO fragments') _extract_zero_shard_files(args, ds_checkpoint, temp_dir) - print('*** 2. Merging slices') + print('*** 2. Merging slices .....') _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir) print('*** 3. Saving common optimizer states') diff --git a/deepspeed/checkpoint/reshape_3d_utils.py b/deepspeed/checkpoint/reshape_3d_utils.py index b5bf41e2d160..02b3947624a1 100644 --- a/deepspeed/checkpoint/reshape_3d_utils.py +++ b/deepspeed/checkpoint/reshape_3d_utils.py @@ -81,7 +81,7 @@ def get_model_3d_descriptor(dir): else: tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) dp_degree = max(1, len(zero_file_list) // tp_degree) - pp_degree = 0 + pp_degree = 1 return model_3d_desc(pp_degree, tp_degree, dp_degree) diff --git a/deepspeed/checkpoint/universal_checkpoint.py b/deepspeed/checkpoint/universal_checkpoint.py index 5849a834cdd3..542d1125c566 100644 --- a/deepspeed/checkpoint/universal_checkpoint.py +++ b/deepspeed/checkpoint/universal_checkpoint.py @@ -13,8 +13,8 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): hp_mapping = self._hp_mapping optim_state_keys = hp_mapping.get_optim_state_keys() hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys + #print(f'{hp_keys=}') checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys} - for file in checkpoint_files.values(): assert os.path.isfile(file), f'{file} is not a valid file' diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 1d2d561dbd39..aeb533698af3 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -2258,7 +2258,9 @@ def _load_hp_checkpoint_state(self, checkpoint_dir): self._load_global_state(optim_sd) tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) - tp_world_size = self.mpu.get_slice_parallel_world_size() + tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ + else self.mpu.get_tensor_model_parallel_world_size() + for i, _ in enumerate(self.optimizer.param_groups): for lp in self.bit16_groups[i]: if lp._hp_mapping is not None: From 84eaf5ac843234737f0b49e36a818d1aabd1776f Mon Sep 17 00:00:00 2001 From: Michael Wyatt Date: Fri, 15 Dec 2023 10:23:22 -0800 Subject: [PATCH 3/7] Accelerate CI fix (#4819) --- .github/workflows/nv-accelerate-v100.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nv-accelerate-v100.yml b/.github/workflows/nv-accelerate-v100.yml index d8a03ff34f78..31e413124aed 100644 --- a/.github/workflows/nv-accelerate-v100.yml +++ b/.github/workflows/nv-accelerate-v100.yml @@ -18,7 +18,7 @@ concurrency: jobs: unit-tests: - runs-on: [self-hosted, nvidia, cu111, v100] + runs-on: [self-hosted, nvidia, cu116, v100] steps: - uses: actions/checkout@v3 @@ -28,7 +28,7 @@ jobs: - name: Install pytorch run: | - pip install -U --cache-dir $TORCH_CACHE torch torchvision --extra-index-url https://download.pytorch.org/whl/cu111 + pip install -U --cache-dir $TORCH_CACHE torch --index-url https://download.pytorch.org/whl/cu118 python -c "import torch; print('torch:', torch.__version__, torch)" python -c "import torch; print('CUDA available:', torch.cuda.is_available())" From 4a6e0c06240b45185709ac4a2902ec42518049d2 Mon Sep 17 00:00:00 2001 From: jxysoft Date: Sat, 16 Dec 2023 05:01:12 +0800 Subject: [PATCH 4/7] =?UTF-8?q?fix=20[BUG]=20'DeepSpeedGPTInference'=20obj?= =?UTF-8?q?ect=20has=20no=20attribute=20'dtype'=20for=E2=80=A6=20(#4814)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- deepspeed/model_implementations/transformers/ds_transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py index a41df58ad059..d87d0de997b5 100644 --- a/deepspeed/model_implementations/transformers/ds_transformer.py +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -163,7 +163,7 @@ def forward( if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \ and input.dtype == torch.float: - target_dtype = torch.half if self.dtype == torch.int8 else self.dtype + target_dtype = torch.half if self.config.dtype == torch.int8 else self.config.dtype input = input.to(target_dtype) with torch.no_grad(): From d1f1d45f4b4eb86bf5b82ed617f09f528bb00d11 Mon Sep 17 00:00:00 2001 From: Michael Wyatt Date: Fri, 15 Dec 2023 13:02:17 -0800 Subject: [PATCH 5/7] Update broken link in docs (#4822) resolves #4821 --- docs/_tutorials/getting-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/_tutorials/getting-started.md b/docs/_tutorials/getting-started.md index 8d2bbf2d9964..f9a4cfdc68b4 100644 --- a/docs/_tutorials/getting-started.md +++ b/docs/_tutorials/getting-started.md @@ -8,7 +8,7 @@ tags: getting-started ## Installation * Installing is as simple as `pip install deepspeed`, [see more details](/tutorials/advanced-install/). -* To get started with DeepSpeed on AzureML, please see the [AzureML Examples GitHub](https://github.com/Azure/azureml-examples/tree/main/python-sdk/workflows/train/deepspeed) +* To get started with DeepSpeed on AzureML, please see the [AzureML Examples GitHub](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/deepspeed) * DeepSpeed has direct integrations with [HuggingFace Transformers](https://github.com/huggingface/transformers) and [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning). HuggingFace Transformers users can now easily accelerate their models with DeepSpeed through a simple ``--deepspeed`` flag + config file [See more details](https://huggingface.co/docs/transformers/main_classes/deepspeed). PyTorch Lightning provides easy access to DeepSpeed through the Lightning Trainer [See more details](https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html?highlight=deepspeed#deepspeed). * DeepSpeed on AMD can be used via our [ROCm images](https://hub.docker.com/r/deepspeed/rocm501/tags), e.g., `docker pull deepspeed/rocm501:ds060_pytorch110`. From b83b1c2e1c4dc4c91c4ad78773dc2232ca9f7070 Mon Sep 17 00:00:00 2001 From: Logan Adams <114770087+loadams@users.noreply.github.com> Date: Fri, 15 Dec 2023 14:12:50 -0800 Subject: [PATCH 6/7] Update imports from Transformers (#4817) --- requirements/requirements-dev.txt | 2 +- requirements/requirements-inf.txt | 2 +- tests/unit/inference/quantization/test_intX_quantization.py | 4 ++-- tests/unit/runtime/zero/test_zero_nesting_init.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements/requirements-dev.txt b/requirements/requirements-dev.txt index 078386c457bd..7204eead5864 100644 --- a/requirements/requirements-dev.txt +++ b/requirements/requirements-dev.txt @@ -16,5 +16,5 @@ sphinx sphinx-rtd-theme tensorboard torchvision -transformers +transformers>=4.32.1 wandb diff --git a/requirements/requirements-inf.txt b/requirements/requirements-inf.txt index 848a7f7a485d..27371e623f26 100644 --- a/requirements/requirements-inf.txt +++ b/requirements/requirements-inf.txt @@ -1,5 +1,5 @@ google lm-eval==0.3.0 protobuf -transformers +transformers>=4.32.1 transformers[sentencepiece] diff --git a/tests/unit/inference/quantization/test_intX_quantization.py b/tests/unit/inference/quantization/test_intX_quantization.py index 56df2b232d15..fd6a8e5ad2e1 100644 --- a/tests/unit/inference/quantization/test_intX_quantization.py +++ b/tests/unit/inference/quantization/test_intX_quantization.py @@ -55,7 +55,7 @@ def quantization_test_helper(pre_quant_type: torch.dtype, num_bits: int): def zero3_post_init_quantization_test_helper(cpu_offload: bool, nvme_offload: bool, bits: int): import deepspeed - from transformers.deepspeed import HfDeepSpeedConfig + from transformers.integrations.deepspeed import HfDeepSpeedConfig def get_zero3_ds_config(hf_config: OPTConfig, cpu_offload: bool, nvme_offload: bool, bits: int) -> Dict: GB = 1 << 30 @@ -172,7 +172,7 @@ def get_zero3_ds_config(hf_config: OPTConfig, cpu_offload: bool, nvme_offload: b def zero3_quantized_initialization_test_helper(cpu_offload: bool, nvme_offload: bool, bits: int): import deepspeed - from transformers.deepspeed import HfDeepSpeedConfig + from transformers.integrations.deepspeed import HfDeepSpeedConfig def get_zero3_ds_config(hf_config: OPTConfig, cpu_offload: bool, nvme_offload: bool, bits: int) -> Dict: GB = 1 << 30 diff --git a/tests/unit/runtime/zero/test_zero_nesting_init.py b/tests/unit/runtime/zero/test_zero_nesting_init.py index 143e7e997b13..15d82fd8be00 100644 --- a/tests/unit/runtime/zero/test_zero_nesting_init.py +++ b/tests/unit/runtime/zero/test_zero_nesting_init.py @@ -8,7 +8,7 @@ from unit.common import DistributedTest from transformers import VisionEncoderDecoderModel -from transformers.deepspeed import HfDeepSpeedConfig +from transformers.integrations.deepspeed import HfDeepSpeedConfig import deepspeed From bc1b5a6c06049f39d1e5c18bbe0f29a09e11f4a3 Mon Sep 17 00:00:00 2001 From: Michael Wyatt Date: Fri, 15 Dec 2023 14:39:25 -0800 Subject: [PATCH 7/7] Minor updates to CI workflows (#4823) --- .github/workflows/amd-mi100.yml | 56 ------------------------------ .github/workflows/auto-sync.yml | 59 -------------------------------- .github/workflows/formatting.yml | 2 +- .github/workflows/python.yml | 2 +- 4 files changed, 2 insertions(+), 117 deletions(-) delete mode 100644 .github/workflows/amd-mi100.yml delete mode 100644 .github/workflows/auto-sync.yml diff --git a/.github/workflows/amd-mi100.yml b/.github/workflows/amd-mi100.yml deleted file mode 100644 index 7ad0f4178db4..000000000000 --- a/.github/workflows/amd-mi100.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: amd-mi100 - -on: - schedule: - - cron: "0 0 * * *" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - amd-tests: - # The type of runner that the job will run on - runs-on: [self-hosted, amd, mi100] - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v3 - - - id: setup-venv - uses: ./.github/workflows/setup-venv - - - name: Install pytorch - run: | - pip install --cache-dir $TORCH_CACHE torch==1.13.1 torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.1.1 - python -c "import torch; print('torch:', torch.__version__, torch)" - python -c "import torch; print('CUDA available:', torch.cuda.is_available())" - - - name: Install transformers - run: | - git clone https://github.com/huggingface/transformers - cd transformers - # if needed switch to the last known good SHA until transformers@master is fixed - # git checkout 1cc453d33 - git rev-parse --short HEAD - pip install . - - # Runs a set of commands using the runners shell - - name: Install deepspeed - run: | - pip install .[dev,1bit,autotuning] - #python -c "from deepspeed.env_report import cli_main; cli_main()" - ds_report - - - name: Python environment - run: | - pip list - - # Runs a set of commands using the runners shell - - name: Unit tests - run: | - unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch - cd tests - pytest $PYTEST_OPTS -n 4 --verbose unit/ - pytest $PYTEST_OPTS -m 'sequential' unit/ diff --git a/.github/workflows/auto-sync.yml b/.github/workflows/auto-sync.yml deleted file mode 100644 index bfbf5a2ae37a..000000000000 --- a/.github/workflows/auto-sync.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: AutoSync - -on: - push: - branches: - - 'master' - -jobs: - - Create-PR: - runs-on: ubuntu-20.04 - - steps: - - uses: actions/checkout@v3 - with: - token: ${{ secrets.GHP_TOKEN }} - repository: ${{ secrets.DST_REPO }} - ref: ${{ secrets.DST_REPO_BRANCH }} - path: dst-repo - - - name: Get PR data - run: | - echo "REPO=${{ github.repository }}" >> $GITHUB_ENV - echo "COMMIT_SHA=${{ github.event.after }}" >> $GITHUB_ENV - echo "SHORT_SHA=$(echo ${{ github.event.after }} | cut -c1-8)" >> $GITHUB_ENV - echo "USERNAME=${{ github.event.head_commit.author.username }}" >> $GITHUB_ENV - echo "USER_EMAIL=${{ github.event.head_commit.author.username }}@users.noreply.github.com" >> $GITHUB_ENV - echo "PR_NAME=$(echo '${{ github.event.head_commit.message }}' | head -1 | sed 's|#|${{ github.repository }}#|g')" >> $GITHUB_ENV - - - name: Cherry pick commit - continue-on-error: true - run: | - cd dst-repo - git config --global user.name ${{ env.USERNAME }} - git config --global user.email ${{ env.USER_EMAIL }} - git fetch https://github.com/${{ env.REPO }}.git master - git cherry-pick FETCH_HEAD --strategy-option octopus - - - name: Add modified files - run: | - cd dst-repo - git add . - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v4 - with: - path: dst-repo - token: ${{ secrets.GHP_TOKEN }} - body: | - **Auto-generated PR** - Repo - [${{ env.REPO }}](https://github.com/${{ env.REPO }}) - PR name - ${{ env.PR_NAME }} - Commit - ${{ env.REPO }}@${{ env.COMMIT_SHA }} - Author - @${{ env.USERNAME }} - branch: AutoPR/${{ env.SHORT_SHA }} - assignees: ${{ env.USERNAME }} - title: ${{ env.PR_NAME }} - labels: AutoPR - author: ${{ env.USERNAME }} <${{ env.USER_EMAIL }}> diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml index a168af277fb8..26f3819dd2bf 100644 --- a/.github/workflows/formatting.yml +++ b/.github/workflows/formatting.yml @@ -16,7 +16,7 @@ concurrency: jobs: # formatting and basic install on cpu-only machine - formatting: + unit-tests: runs-on: ubuntu-20.04 steps: diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 279bad471c01..59770a5e23b3 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -17,7 +17,7 @@ concurrency: cancel-in-progress: true jobs: - version-check: + unit-tests: strategy: matrix: pyVersion: ["3.6", "3.7", "3.8", "3.9", "3.10"]