diff --git a/.github/workflows/nv-nightly.yml b/.github/workflows/nv-nightly.yml index e540b5acaf33..21d5eebc8fe0 100644 --- a/.github/workflows/nv-nightly.yml +++ b/.github/workflows/nv-nightly.yml @@ -38,10 +38,6 @@ jobs: git rev-parse --short HEAD pip install . - - name: Install datasets - run: | - pip install datasets - - name: Install deepspeed run: | pip install .[dev,1bit,autotuning,inf] diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 11a14256801a..e3f14ab3f7e5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,7 +39,7 @@ repos: name: check-torchdist entry: ./scripts/check-torchdist.py language: python - exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py) + exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py|deepspeed/runtime/zero/utils.py|deepspeed/tools/pg_sim/ut/base.py|deepspeed/tools/pg_sim/pg.py) # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - repo: local diff --git a/accelerator/abstract_accelerator.py b/accelerator/abstract_accelerator.py index 3c5d799e293e..368843a69fa2 100644 --- a/accelerator/abstract_accelerator.py +++ b/accelerator/abstract_accelerator.py @@ -280,6 +280,10 @@ def create_op_builder(self, class_name): def get_op_builder(self, class_name): ... + @abc.abstractmethod + def get_compile_backend(self): + ... + @abc.abstractmethod def build_extension(self): ... diff --git a/accelerator/cpu_accelerator.py b/accelerator/cpu_accelerator.py index b1aba75b4c5e..c3a8ff95fbed 100644 --- a/accelerator/cpu_accelerator.py +++ b/accelerator/cpu_accelerator.py @@ -302,3 +302,6 @@ def build_extension(self): def export_envs(self): return [] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/cuda_accelerator.py b/accelerator/cuda_accelerator.py index 3d5e9c168c16..4fbf6615992d 100644 --- a/accelerator/cuda_accelerator.py +++ b/accelerator/cuda_accelerator.py @@ -360,3 +360,6 @@ def build_extension(self): def export_envs(self): return ['NCCL'] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/hpu_accelerator.py b/accelerator/hpu_accelerator.py index 30b115e8b1ab..cdb5fc91ba35 100644 --- a/accelerator/hpu_accelerator.py +++ b/accelerator/hpu_accelerator.py @@ -288,6 +288,17 @@ def get_op_builder(self, class_name): else: return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + def get_compile_backend(self): + return "hpu_backend" + + #shall be removed once moving to torch.compile + def wrap_in_hpu_graph(self, module): + if self.hpu.is_lazy(): + module = self.hpu.wrap_in_hpu_graph(module) + else: + print("Warning: hpu graphs in eager mode is not supported, ignoring") + return module + def build_extension(self): from torch.utils.cpp_extension import BuildExtension return BuildExtension diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py index 972b33caece1..ae36e541f869 100644 --- a/accelerator/mps_accelerator.py +++ b/accelerator/mps_accelerator.py @@ -258,3 +258,6 @@ def build_extension(self): def export_envs(self): return [] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/npu_accelerator.py b/accelerator/npu_accelerator.py index 472157e32c02..4912fd05f8c4 100644 --- a/accelerator/npu_accelerator.py +++ b/accelerator/npu_accelerator.py @@ -278,3 +278,6 @@ def build_extension(self): def export_envs(self): return ['ASCEND', 'HCCL', 'LD_LIBRARY', 'PATH'] + + def get_compile_backend(self): + return "inductor" diff --git a/build.txt b/build.txt new file mode 100644 index 000000000000..1a0363202d46 --- /dev/null +++ b/build.txt @@ -0,0 +1 @@ ++hpu.synapse.v1.16.1 diff --git a/csrc/adam/cpu_adam_impl.cpp b/csrc/adam/cpu_adam_impl.cpp index 9a4a8d956519..c87eeea300f6 100644 --- a/csrc/adam/cpu_adam_impl.cpp +++ b/csrc/adam/cpu_adam_impl.cpp @@ -244,13 +244,17 @@ int ds_adam_step(int optimizer_id, opt->IncrementStep(step, beta1, beta2); opt->update_state(lr, epsilon, weight_decay, bias_correction); + bool bit16_precision = false; + if ((params.options().dtype() == at::kHalf) || (params.options().dtype() == at::kBFloat16)) + bit16_precision = true; + opt->Step_8(params_ptr, grads_ptr, exp_avg_ptr, exp_avg_sq_ptr, params_c.numel(), nullptr, - (params.options().dtype() == at::kHalf)); + bit16_precision); #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) opt->SynchronizeStreams(); diff --git a/csrc/includes/cpu_adam.h b/csrc/includes/cpu_adam.h index b1a104b2571d..05c952eb39a0 100644 --- a/csrc/includes/cpu_adam.h +++ b/csrc/includes/cpu_adam.h @@ -23,6 +23,9 @@ typedef __half ds_half_precision_t; #include "acl/acl.h" #include "torch_npu/csrc/core/npu/NPUStream.h" typedef c10::Half ds_half_precision_t; +#elif defined(__BFLOAT16__) +#include +typedef at::BFloat16 ds_half_precision_t #else #include typedef unsigned short ds_half_precision_t; @@ -259,6 +262,7 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, simd_store(_exp_avg + i, momentum_4, false); simd_store(_exp_avg_sq + i, variance_4, false); } +// Params are updated only in case of float16, which is currently not supported on HPU #if defined(__ENABLE_CUDA__) if (dev_params) { if (half_precision) diff --git a/csrc/includes/simd.h b/csrc/includes/simd.h index 59237b0261c1..e69d8ca0e2a3 100644 --- a/csrc/includes/simd.h +++ b/csrc/includes/simd.h @@ -9,6 +9,23 @@ #include #include #endif +#include +#include +#include + +template +inline T readAs(const void* src) +{ + T res; + std::memcpy(&res, src, sizeof(T)); + return res; +} + +template +inline void writeAs(void* dst, const T& val) +{ + std::memcpy(dst, &val, sizeof(T)); +} #define TILE (128 * 1024 * 1024) #if defined(__AVX512__) or defined(__AVX256__) @@ -29,12 +46,58 @@ #define SIMD_OR(x, y) _mm512_or_ps(x, y) #define SIMD_XOR(x, y) _mm512_xor_ps(x, y) #define SIMD_WIDTH 16 +#if defined(ENABLE_BFLOAT16) +static __m512 load_16_bf16_as_f32(const void* data) +{ + __m256i a = readAs<__m256i>(data); // use memcpy to avoid aliasing + __m512i b = _mm512_cvtepu16_epi32(a); // convert 8 u16 to 8 u32 + __m512i c = _mm512_slli_epi32(b, 16); // logical shift left of all u32 by + // 16 bits (representing bf16->f32) + return readAs<__m512>(&c); // use memcpy to avoid aliasing +} +static void store_16_f32_as_bf16_nearest(__m512 v, void* data) +{ + __m512i u32 = readAs<__m512i>(&v); + + // flow assuming non-nan: + + // uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF); + __m512i b = _mm512_srli_epi32(u32, 16); + __m512i lsb_mask = _mm512_set1_epi32(0x00000001); + __m512i c = _mm512_and_si512(b, lsb_mask); + __m512i bias_constant = _mm512_set1_epi32(0x00007fff); + __m512i rounding_bias = _mm512_add_epi32(c, bias_constant); + + // uint16_t res = static_cast((U32 + rounding_bias) >> 16); + __m512i d = _mm512_add_epi32(u32, rounding_bias); + __m512i e = _mm512_srli_epi32(d, 16); + __m256i non_nan_res = _mm512_cvtusepi32_epi16(e); + + // handle nan (exp is all 1s and mantissa != 0) + // if ((x & 0x7fffffffU) > 0x7f800000U) + __m512i mask_out_sign = _mm512_set1_epi32(0x7fffffff); + __m512i non_sign_bits = _mm512_and_si512(u32, mask_out_sign); + __m512i nan_threshold = _mm512_set1_epi32(0x7f800000); + __mmask16 nan_mask = _mm512_cmp_epi32_mask(non_sign_bits, nan_threshold, _MM_CMPINT_GT); + + // mix in results with nans as needed + __m256i nans = _mm256_set1_epi16(0x7fc0); + __m256i res = _mm256_mask_mov_epi16(non_nan_res, nan_mask, nans); + + writeAs(data, res); +} + +#define SIMD_LOAD2(x, h) ((h) ? load_16_bf16_as_f32(x) : _mm512_loadu_ps(x)) + +#define SIMD_STORE2(x, d, h) ((h) ? store_16_f32_as_bf16_nearest(d, x) : _mm512_storeu_ps(x, d)) +#else // ENABLE_BFLOAT16 #define SIMD_LOAD2(x, h) \ ((h) ? _mm512_cvtph_ps(_mm256_castps_si256(_mm256_loadu_ps(x))) : _mm512_loadu_ps(x)) #define SIMD_STORE2(x, d, h) \ ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ : _mm512_storeu_ps(x, d)) +#endif // ENABLE_BFLOAT16 #define INTV __m256i #elif defined(__AVX256__) @@ -52,12 +115,57 @@ #define SIMD_XOR(x, y) _mm256_xor_ps(x, y) #define SIMD_WIDTH 8 +#if defined(ENABLE_BFLOAT16) +__m256 load_8_bf16_as_f32(const float* data) +{ + __m128i a = readAs<__m128i>(data); // use memcpy to avoid aliasing + __m256i b = _mm256_cvtepu16_epi32(a); // convert 8 u16 to 8 u32 + __m256i c = _mm256_slli_epi32(b, 16); // logical shift left of all u32 by + // 16 bits (representing bf16->f32) + return readAs<__m256>(&c); // use memcpy to avoid aliasing +} + +void store_8_f32_as_bf16_nearest(__m256 v, float* data) +{ + __m256i u32 = readAs<__m256i>(&v); + + // flow assuming non-nan: + + // uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF); + __m256i b = _mm256_srli_epi32(u32, 16); + __m256i lsb_mask = _mm256_set1_epi32(0x00000001); + __m256i c = _mm256_and_si256(b, lsb_mask); + __m256i bias_constant = _mm256_set1_epi32(0x00007fff); + __m256i rounding_bias = _mm256_add_epi32(c, bias_constant); + + // uint16_t res = static_cast((U32 + rounding_bias) >> 16); + __m256i d = _mm256_add_epi32(u32, rounding_bias); + __m256i e = _mm256_srli_epi32(d, 16); + __m128i non_nan_res = _mm256_cvtusepi32_epi16(e); + + // handle nan (exp is all 1s and mantissa != 0) + // if ((x & 0x7fffffffU) > 0x7f800000U) + __m256i mask_out_sign = _mm256_set1_epi32(0x7fffffff); + __m256i non_sign_bits = _mm256_and_si256(u32, mask_out_sign); + __m256i nan_threshold = _mm256_set1_epi32(0x7f800000); + __mmask8 nan_mask = _mm256_cmp_epi32_mask(non_sign_bits, nan_threshold, _MM_CMPINT_GT); + + // mix in results with nans as needed + __m128i nans = _mm_set1_epi16(0x7fc0); + __m128i res = _mm_mask_mov_epi16(non_nan_res, nan_mask, nans); + + writeAs(data, res); +} +#define SIMD_LOAD2(x, h) ((h) ? load_8_bf16_as_f32(x) : _mm256_loadu_ps(x)) + +#define SIMD_STORE2(x, d, h) ((h) ? store_8_f32_as_bf16_nearest(d, x) : _mm256_storeu_ps(x, d)) +#else // ENABLE_BFLOAT16 #define SIMD_LOAD2(x, h) \ ((h) ? _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)(x))) : _mm256_loadu_ps(x)) #define SIMD_STORE2(x, d, h) \ ((h) ? _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ : _mm256_storeu_ps(x, d)) - +#endif // ENABLE_BFLOAT16 #define INTV __m128i #endif diff --git a/csrc/transformer/inference/csrc/pt_binding.cpp b/csrc/transformer/inference/csrc/pt_binding.cpp index b7277d1e1678..254785b440b9 100644 --- a/csrc/transformer/inference/csrc/pt_binding.cpp +++ b/csrc/transformer/inference/csrc/pt_binding.cpp @@ -446,15 +446,15 @@ std::vector ds_softmax_context(at::Tensor& query_key_value, unsigned layer_id, unsigned num_layers, at::Tensor& alibi, - float rope_theta) + float rope_theta, + bool is_prompt, + std::optional token_idx) { unsigned bsz = query_key_value.size(0); unsigned seq_len = query_key_value.size(1); int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); unsigned hidden_dim = heads * k; - bool is_prompt = (seq_len > 1); - if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); unsigned soft_len = InferenceContext::Instance().current_tokens(); @@ -847,6 +847,87 @@ std::vector ds_layer_norm_residual_store_pre_ln_res(at::Tensor& inpu return {norm_output, res_output}; } +template +at::Tensor ds_transform4d_0213(at::Tensor& input, int seq_length) +{ + auto input_cont = input.contiguous(); + unsigned batch_size = input.size(0); + unsigned num_heads = input.size(1); + unsigned seq_length_head_dim = input.size(2); + unsigned head_dim = seq_length_head_dim / seq_length; + unsigned hidden_dim = num_heads * head_dim; + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + + launch_transform4d_0213(workspace, + (T*)input.data_ptr(), + batch_size, + num_heads, + seq_length, + hidden_dim, + InferenceContext::Instance().GetCurrentStream(), + 1); + auto output = at::from_blob(workspace, {batch_size, seq_length, num_heads, head_dim}, options); + return output; +} + +template +std::vector ds_bias_add_transform_0213(at::Tensor& input, + at::Tensor& bias, + int num_heads, + int trans_count) +{ + TORCH_CHECK( + trans_count == 1 or trans_count == 3, "trans_count ", trans_count, " is not supported"); + auto input_cont = input.contiguous(); + + unsigned batch_size = input.size(0); + unsigned seq_length = input.size(1); + unsigned value_size = input.size(2); + unsigned hidden_dim = input.size(2) / trans_count; + unsigned head_dim = hidden_dim / num_heads; + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + auto final_output = workspace; + int num_kv = -1; + int repo_theta = -1; + size_t offset = (batch_size * seq_length * hidden_dim); + launch_bias_add_transform_0213(final_output, + final_output + offset, + final_output + 2 * offset, + (T*)input.data_ptr(), + (T*)bias.data_ptr(), + batch_size, + seq_length, + 0, // seq_offset + input.size(1), // all_tokens .. unused? + hidden_dim, + num_heads, + num_kv, + -1, // rotary_dim + false, // rotate_half + false, // rotate_every_two + InferenceContext::Instance().GetCurrentStream(), + trans_count, // trans_count + input.size(1), // max_out_tokens + repo_theta); + return {at::from_blob(final_output, {batch_size, num_heads, seq_length, head_dim}, options), + at::from_blob( + final_output + offset, {batch_size, num_heads, seq_length, head_dim}, options), + at::from_blob( + final_output + 2 * offset, {batch_size, num_heads, seq_length, head_dim}, options)}; +} + template void quantized_gemm(void* output, T* input, @@ -2010,7 +2091,13 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ m.def("dequantize_" #_name, \ &ds_dequantize<_dtype>, \ - "DeepSpeed dequantize with " #_name " (CUDA)") + "DeepSpeed dequantize with " #_name " (CUDA)"); \ + m.def("transform4d_0213_" #_name, \ + &ds_transform4d_0213<_dtype>, \ + "DeepSpeed transform4d 0213 with " #_name " (CUDA)"); \ + m.def("bias_add_transform_0213_" #_name, \ + &ds_bias_add_transform_0213<_dtype>, \ + "DeepSpeed bias and transform 0213 with " #_name " (CUDA)") DEF_OPS(fp32, float); DEF_OPS(fp16, __half); diff --git a/deepspeed/__init__.py b/deepspeed/__init__.py index 6c7aa8b15ef9..f456bcd2752c 100755 --- a/deepspeed/__init__.py +++ b/deepspeed/__init__.py @@ -22,6 +22,7 @@ else: HAS_TRITON = False +from .utils import log_dist, OnDevice, logger from . import ops from . import module_inject @@ -38,11 +39,9 @@ from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig from .module_inject import replace_transformer_layer, revert_transformer_layer -from .utils import log_dist, OnDevice, logger from .comm.comm import init_distributed from .runtime import zero -from .runtime import DeepSpeedOptimizer, ZeROOptimizer from .runtime.compiler import is_compile_supported from .pipe import PipelineModule diff --git a/deepspeed/checkpoint/__init__.py b/deepspeed/checkpoint/__init__.py index c9822693867d..1f645a494ade 100644 --- a/deepspeed/checkpoint/__init__.py +++ b/deepspeed/checkpoint/__init__.py @@ -15,6 +15,6 @@ from .zero_checkpoint import ZeROCheckpoint -from .universal_checkpoint import enable_universal_checkpoint +from .universal_checkpoint import enable_universal_checkpoint, SubparamShape from .constants import * diff --git a/deepspeed/checkpoint/constants.py b/deepspeed/checkpoint/constants.py index f809a0c39270..70d54f85e5f3 100644 --- a/deepspeed/checkpoint/constants.py +++ b/deepspeed/checkpoint/constants.py @@ -16,6 +16,7 @@ BASE_OPTIMIZER_STATE = 'base_optimizer_state' BASE_OPTIMIZER_STATE_STEP = 'base_optimizer_state_step' SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups" +PARAM_GROUPS = 'param_groups' GROUP_PADDINGS = 'group_paddings' PARTITION_COUNT = 'partition_count' ZERO_STAGE = 'zero_stage' @@ -58,6 +59,8 @@ # Vocabulary padding VOCAB_TENSOR = 'vocab_tensor' +# TODO: remove once transition to new universal checkpoint is completed +VOCAB_DIVISIBILITY_PADDING_TENSOR = 'vocab_divisibility_padding_tensor' PADDED_VOCAB_SIZE = 'padded_vocab_size' ORIGINAL_VOCAB_SIZE = 'original_vocab_size' @@ -73,6 +76,8 @@ # Similarly, load_hp_checkpoint_state has to take the needed actions when loading from universal. PARAM_N_SUB_PARAMS = "param_n_sub_params" +SUB_PARAM_SHAPE = "sub_param_shape" + # Regex list of parameters that require special handling VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns' PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns' @@ -80,3 +85,5 @@ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns' TP_REPLICATED_PARAMETER_PATTERNS = 'tp_replicated_parameter_patterns' PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0 = 'parameter_with_2_sub_params_cat_dim_0' +PARAMETER_WITH_SUB_PARAMS = 'parameter_with_sub_params' +SUB_PARAMS_SHAPE = 'sub_params_shape' diff --git a/deepspeed/checkpoint/deepspeed_checkpoint.py b/deepspeed/checkpoint/deepspeed_checkpoint.py index 8312dddd2fa6..c2d6f475e979 100644 --- a/deepspeed/checkpoint/deepspeed_checkpoint.py +++ b/deepspeed/checkpoint/deepspeed_checkpoint.py @@ -4,6 +4,7 @@ # DeepSpeed Team import os +import re from typing import Dict import torch @@ -32,7 +33,13 @@ class DeepSpeedCheckpoint(object): - def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): + def __init__(self, + dir, + tp_degree=None, + pp_degree=None, + dp_degree=None, + final_layer_norm_idx=FINAL_LAYER_NORM_INDEX): + self.final_layer_norm_idx = final_layer_norm_idx self.dir = dir pipeline_parallel = len(get_files_with_prefix(get_files(dir), LAYER_FILE_PREFIX)) > 0 @@ -73,7 +80,7 @@ def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): self.pp_to_transformer_map = self._build_pp_transformer_map() self.transformer_file_map = self._build_transformer_file_map() self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX) - self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX) + self.tp_to_final_norm_map = self._build_tp_other_layer_map(self.final_layer_norm_idx) self._build_global_state() def is_change_tp_degree(self): @@ -125,7 +132,7 @@ def get_embedding_layer_id(self): return self.layer_keys[EMBEDDING_LAYER_INDEX] def get_final_norm_layer_id(self): - return self.layer_keys[FINAL_LAYER_NORM_INDEX] + return self.layer_keys[self.final_layer_norm_idx] def get_iteration(self): if not ITERATION_KEY in self.global_state: @@ -214,7 +221,7 @@ def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list: def _build_pp_transformer_map(self): data_map = {} if self.pp_degree > 0: - transformer_layers = self.layer_keys[1:-1] + transformer_layers = self.layer_keys[1:self.final_layer_norm_idx] layers_per_pp = len(transformer_layers) // self.pp_degree data_map = { i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] @@ -229,7 +236,7 @@ def _dump_mapping(self, data_map, map_tag=None): print(f'{k} = {v}') def _build_transformer_file_map(self): - transformer_layer_keys = self.layer_keys[1:-1] + transformer_layer_keys = self.layer_keys[1:self.final_layer_norm_idx] file_map = {} # XXX: this is not guaranteed layers_per_pp = 1 @@ -238,7 +245,7 @@ def _build_transformer_file_map(self): #print(f"{transformer_layer_keys} {layers_per_pp}") for key_index, layer_key in enumerate(transformer_layer_keys): pp_index = key_index // layers_per_pp - layer_files = get_files_with_prefix(self.layer_files, layer_key) + layer_files = get_files_with_prefix(self.layer_files, layer_key + '-') layer_file_partitions = partition_data(layer_files, self.tp_degree) for tp_index in range(self.tp_degree): map_key = (tp_index, pp_index) @@ -263,11 +270,13 @@ def validate_files(self): def _get_layer_keys(self): key_set = set() - key_len = len(LAYER_FILE_PREFIX) + 2 for file_path in self.layer_files: _, fname = os.path.split(file_path) - key_set.add(fname[:key_len]) - return sorted(list(key_set)) + layer_id = re.search(r'layer_(\d+)-model_.*', fname).group(1) + key_set.add(layer_id) + sorted_ids = sorted(list(key_set), key=int) + layer_keys = [LAYER_FILE_PREFIX + str(layer_id) for layer_id in sorted_ids] + return layer_keys def _merge_state_dicts(self, sd_list): merged_sd = {} diff --git a/deepspeed/checkpoint/ds_to_universal.py b/deepspeed/checkpoint/ds_to_universal.py index f40c5630899d..b1a8276589b6 100755 --- a/deepspeed/checkpoint/ds_to_universal.py +++ b/deepspeed/checkpoint/ds_to_universal.py @@ -6,10 +6,11 @@ # DeepSpeed Team from functools import partial +from itertools import chain import argparse import glob import itertools -import multiprocessing +from concurrent.futures import ProcessPoolExecutor import os import re import shutil @@ -22,11 +23,13 @@ OPTIMIZER_STATE_DICT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, + PARAM_GROUPS, PARAM_SLICE_MAPPINGS, PARAM_SHAPES, PARAM, CAT_DIM, PARAM_N_SUB_PARAMS, + SUB_PARAM_SHAPE, VOCAB_TENSOR, UNIVERSAL_CHECKPOINT_INFO, VOCABULARY_PARAMETER_PATTERNS, @@ -35,6 +38,8 @@ PARAMETER_TO_AVERAGE_PATTERNS, PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, + PARAMETER_WITH_SUB_PARAMS, + SubparamShape, ) @@ -110,6 +115,9 @@ def extract_zero_shards(dir, ds_checkpoint, indices_3D): fp32=fp32_groups[param_group_id], ) + if "step" in state_groups[param_group_id]: + flat_state["step"] = state_groups[param_group_id]["step"] + for name, fragment_mapping in param_slice_mappings[param_group_id].items(): if pp_index > 0 and any(re.match(pattern, name) for pattern in pipeline_replicated_params): # Skip tied weights that are replicated in first and last pp stages @@ -124,6 +132,10 @@ def extract_zero_shards(dir, ds_checkpoint, indices_3D): cnt = 0 +def dp_index_to_str(dp_index): + return f"{dp_index:0>2d}" + + def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, param_name, offset, numel): global cnt # temp hack @@ -132,23 +144,44 @@ def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, os.makedirs(param_base_path, exist_ok=True) cnt += 1 - counter = f"{dp_index:0>2d}" - path = os.path.join(param_base_path, f"{state_name}.{counter}") + path = os.path.join(param_base_path, f"{state_name}.{dp_index_to_str(dp_index)}") #print(f"{param_name}: {offset}: {numel} => {path}") - t = state_flat_tensor.narrow(0, offset, numel).clone() - _save_checkpoint(path, t) + # State might be a python int or a tensor + if state_name != "step" and torch.is_tensor(state_flat_tensor): + state_flat_tensor = state_flat_tensor.narrow(0, offset, numel).clone() + _save_checkpoint(path, state_flat_tensor) def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape): slices = [] for tp_index in range(tp_degree): prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}") - paths = sorted(list(glob.glob(f"{prefix_path}.*"))) + paths = glob.glob(f"{prefix_path}.*") + + if len(paths) == 0: + continue + + pattern = re.compile(f"{prefix_path}\\.([0-9]+)") + dp_indices = set() + for p in paths: + m = pattern.match(p) + if m: + dp_indices.add(int(m.group(1))) + else: + raise ValueError(f"Cannot parse dp_rank from {p}") + + paths = [f"{prefix_path}.{dp_index_to_str(dp_index)}" for dp_index in sorted(list(dp_indices))] shards = [torch.load(p) for p in paths] - slice = torch.cat(shards, dim=0).reshape(slice_shape) + + if state == "step": + assert all(v == shards[0] for v in shards), "All shards must have the same step value" + slice = shards[0] + else: + slice = torch.cat(shards, dim=0).reshape(slice_shape) + slices.append(slice) return slices @@ -165,8 +198,11 @@ def merge_tp_slices(ds_checkpoint, dir, slice_dir, tp_degree, name_and_shape): parameters_with_row_parallelism = universal_checkpoint_info.get(PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, []) vocabulary_parameters = universal_checkpoint_info.get(VOCABULARY_PARAMETER_PATTERNS, []) parameters_with_2_sub_params_cat_dim_0 = universal_checkpoint_info.get(PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, []) + parameter_with_sub_params = universal_checkpoint_info.get(PARAMETER_WITH_SUB_PARAMS, []) + unmatched_patterns = set(replicated_parameters + parameters_to_average + parameters_with_row_parallelism + vocabulary_parameters + parameters_with_2_sub_params_cat_dim_0) + unmatched_patterns.update(chain.from_iterable(SubparamShape(**s).patterns for s in parameter_with_sub_params)) def get_matched_pattern(patterns_, name_): matched_ = [pattern_ for pattern_ in patterns_ if re.match(pattern_, name_)] @@ -177,6 +213,21 @@ def get_matched_pattern(patterns_, name_): return pattern_ return None + def get_matched_sub_params_pattern(name_): + for subparam_shape_dict in parameter_with_sub_params: + subparam_shape = SubparamShape(**subparam_shape_dict) + for pattern_ in subparam_shape.patterns: + if re.match(pattern_, name_): + unmatched_patterns.discard(pattern_) + return subparam_shape + return None + + matched_sub_params_shape = get_matched_sub_params_pattern(name) + + step_merged = _merge_zero_shards(slice_base_path, "step", tp_degree, shape) + if step_merged: + _save_checkpoint(os.path.join(param_base_path, f"step.pt"), step_merged[0]) + for state in ("fp32", "exp_avg", "exp_avg_sq"): slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape) final_path = os.path.join(param_base_path, f"{state}.pt") @@ -200,6 +251,26 @@ def get_matched_pattern(patterns_, name_): param = torch.cat([merged_chunks_0, merged_chunks_1], dim=cat_dim) ckpt_dict[CAT_DIM] = cat_dim ckpt_dict[PARAM_N_SUB_PARAMS] = 2 + elif matched_sub_params_shape: + merged_chunks = [] + partition_dim = matched_sub_params_shape.partition_dim + + sub_dim_sizes = matched_sub_params_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in matched_sub_params_shape.shape] + partition_shape = [d // tp_degree if i == partition_dim else d for i, d in enumerate(partition_shape)] + slices = [s.view(partition_shape) for s in slices] + + offset = 0 + for sub_dim_size in sub_dim_sizes: + part_sub_dim_size = sub_dim_size // tp_degree + merged_chunks.append( + torch.cat([s.narrow(partition_dim, offset, part_sub_dim_size) for s in slices], dim=partition_dim)) + offset += part_sub_dim_size + param = torch.cat(merged_chunks, dim=partition_dim) + ckpt_dict[SUB_PARAM_SHAPE] = matched_sub_params_shape else: cat_dim = 1 if get_matched_pattern(parameters_with_row_parallelism, name) else 0 # print(f"merge {name} with CAT DIM: {cat_dim}") @@ -221,19 +292,18 @@ def get_matched_pattern(patterns_, name_): return unmatched_patterns -def _get_chunks(l, n): - for i in range(0, len(l), n): - yield l[i:i + n] - - def _do_parallel_work(do_work, work_chunks, num_workers): - pool = multiprocessing.Pool(num_workers) results = [] - for batch in tqdm.tqdm(work_chunks): - res = pool.map(do_work, batch) - results.extend(res) - pool.close() - pool.join() + if num_workers > 1: + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_list = [executor.submit(do_work, work) for work in work_chunks] + for f in tqdm.tqdm(future_list): + results.append(f.result()) + else: + # No parallel pass for unit testing + # We can't create child processes in tests + for work in tqdm.tqdm(work_chunks): + results.append(do_work(work)) return results @@ -242,20 +312,15 @@ def _extract_zero_shard_files(args, ds_checkpoint, temp_dir): itertools.product(range(ds_checkpoint.pp_degree), range(ds_checkpoint.tp_degree), range(ds_checkpoint.dp_degree))) #pprint(f'{_3d_range_list=}') - work_chunks = list(_get_chunks(_3d_range_list, args.num_extract_workers)) - #pprint(f'{work_chunks=}') - # extract_zero_shards(temp_dir, ds_checkpoint, _3d_range_list[0]) do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint) - _do_parallel_work(do_work, work_chunks, args.num_extract_workers) + _do_parallel_work(do_work, _3d_range_list, args.num_extract_workers) def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir): - work_chunks = list(_get_chunks(list(slice_shapes.items()), args.num_merge_workers)) - #pprint(work_chunks) zero_output_folder = os.path.join(args.output_folder, "zero") do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree) - unmatched_patterns_lists = _do_parallel_work(do_work, work_chunks, args.num_merge_workers) + unmatched_patterns_lists = _do_parallel_work(do_work, list(slice_shapes.items()), args.num_merge_workers) # verify that all patterns were used # if a pattern was not used by any of the workers, then it was not used at all -> assert/alert @@ -273,6 +338,7 @@ def _save_optimizer_state(args, ds_checkpoint): optim_sd = sd[OPTIMIZER_STATE_DICT] output_sd = {k: v for k, v in optim_sd.items() if k not in sharded_states} + output_sd[PARAM_GROUPS] = optim_sd[BASE_OPTIMIZER_STATE][PARAM_GROUPS] zero_output_folder = os.path.join(args.output_folder, "zero") output_file_path = os.path.join(zero_output_folder, f"optimizer_state.pt") _save_checkpoint(output_file_path, output_sd) @@ -283,10 +349,9 @@ def _check_for_required_state(ds_checkpoint): assert universal_checkpoint_info is not None, f'Required {UNIVERSAL_CHECKPOINT_INFO} state is missing in checkpoint. Verify that client creates this state.' -def main(): +def main(args): print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint') - args = parse_arguments() print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Universal checkpoint in {args.output_folder}') ds_checkpoint = DeepSpeedCheckpoint(args.input_folder) @@ -332,4 +397,5 @@ def main(): if __name__ == "__main__": - main() + args = parse_arguments() + main(args) diff --git a/deepspeed/checkpoint/reshape_utils.py b/deepspeed/checkpoint/reshape_utils.py index 15b6ce28b2fd..137607721ebf 100644 --- a/deepspeed/checkpoint/reshape_utils.py +++ b/deepspeed/checkpoint/reshape_utils.py @@ -4,9 +4,10 @@ # DeepSpeed Team import os +import re import torch from collections import OrderedDict -from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX) +from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX) def basic_folder_validation(dir): @@ -38,12 +39,28 @@ def get_files(dir): return file_list +def sort_zero_files(files, prefix): + pattern = f"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)" + rank_pairs = [] + for f in files: + m = re.search(pattern, f) + if m: + dp_rank = int(m.group(1)) + mp_rank = int(m.group(2)) + rank_pairs.append((dp_rank, mp_rank, f)) + else: + raise ValueError(f"Cannot parse dp_rank and mp_rank from {f}") + + sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1])) + return [f for _, _, f in sorted_files] + + def get_zero_files(dir): file_list = get_files(dir) for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: zero_files = get_files_with_prefix(file_list, prefix) if len(zero_files) > 0: - return zero_files + return sort_zero_files(zero_files, prefix) return [] diff --git a/deepspeed/checkpoint/universal_checkpoint.py b/deepspeed/checkpoint/universal_checkpoint.py index 542d1125c566..50f8e4a1cd4e 100644 --- a/deepspeed/checkpoint/universal_checkpoint.py +++ b/deepspeed/checkpoint/universal_checkpoint.py @@ -4,23 +4,44 @@ # DeepSpeed Team import os +import re import torch import types -from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS) +# todo: remove VOCAB_DIVISIBILITY_PADDING_TENSOR once transition to universal checkpointing is completed +from typing import List, Tuple, Union +from dataclasses import dataclass +from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS, SUB_PARAM_SHAPE, + VOCAB_DIVISIBILITY_PADDING_TENSOR) + + +@dataclass +class SubparamShape: + patterns: List[str] + shape: Tuple[Union[Tuple[int], int]] + partition_dim: int def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): hp_mapping = self._hp_mapping - optim_state_keys = hp_mapping.get_optim_state_keys() - hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys - #print(f'{hp_keys=}') - checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys} - for file in checkpoint_files.values(): - assert os.path.isfile(file), f'{file} is not a valid file' + hp_mapping.optim_fragment = {} + hp_keys = [] + for file in os.listdir(folder): + # We expect files named something like "exp_avg.pt", "exp_avg_sq.pt", "fp32.pt" + pattern = r'(.+).pt' + match = re.search(pattern, file) + if match: + hp_keys.append(match.group(1)) + + step = None for key in hp_keys: - ckpt_file = checkpoint_files[key] + ckpt_file = os.path.join(folder, f"{key}.pt") ckpt_dict = torch.load(ckpt_file) + + if key == "step": + step = ckpt_dict + continue + full_hp_param = ckpt_dict[PARAM] # need to deal with slices that were averaged. @@ -54,6 +75,23 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): padding_size = padded_target_vocab_size - full_hp_param.shape[0] full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0) + # TODO: For BWD compatibility with old universal checkpointing mechanism, remove once transition is completed + vocab_divisibility_padding_tensor = ckpt_dict.get(VOCAB_DIVISIBILITY_PADDING_TENSOR, None) + if vocab_divisibility_padding_tensor is not None: + # In the absence of data passed from the user wrt new padded vocab specific to tp degree + # we can again derive that data by reverse engineering the target shapes like so: + padded_target_vocab_size = self.shape[0] * tp_world_size + if padded_target_vocab_size > full_hp_param.shape[0]: + # Need to expand + pad_size = padded_target_vocab_size - full_hp_param.shape[0] + hidden_size = vocab_divisibility_padding_tensor.shape[-1] + padding_tensor = vocab_divisibility_padding_tensor.view(1, -1).expand(pad_size, hidden_size) + full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, pad_size), "constant", 0) + full_hp_param[-pad_size:, :] = padding_tensor + else: + # Need to shrink or keep the same + full_hp_param = full_hp_param[:padded_target_vocab_size, :] + full_param_numel = full_hp_param.numel() tp_slice_numel = self.numel() # if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder: @@ -62,17 +100,36 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): assert full_param_numel == tp_world_size * tp_slice_numel, \ f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}' - dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key) # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}") # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}") + sub_param_shape = ckpt_dict.get(SUB_PARAM_SHAPE, None) # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse # special case is when a single parameter is effectively a container for multiple sub parameters # (more details at PARAM_N_SUB_PARAMS definition) chunk_dim = ckpt_dict.get(CAT_DIM, 0) n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1) - if n_sub_params > 1: + if sub_param_shape: + partition_dim = sub_param_shape.partition_dim + sub_dim_sizes = sub_param_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in sub_param_shape.shape] + full_hp_param = full_hp_param.view(partition_shape) + + offset = 0 + merged_chunks = [] + for sub_dim_size in sub_dim_sizes: + sub_params_tp_slice = full_hp_param.narrow(partition_dim, + offset, sub_dim_size).chunk(tp_world_size, + dim=partition_dim)[tp_rank] + merged_chunks.append(sub_params_tp_slice) + offset += sub_dim_size + tp_hp_slice = torch.cat(merged_chunks, dim=partition_dim) + + elif n_sub_params > 1: sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim) sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params] tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim) @@ -84,13 +141,23 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): lp_frag_address = hp_mapping.lp_fragment_address tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel) - assert dst_tensor.numel() == lp_frag_address.numel, \ - f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' # print(f"{key} SHAPE: {tp_hp_slice.shape=}") # print(f"{key} SHAPE: {dst_tensor.shape=}") # print(f"{key} SHAPE: {tp_hp_fragment.shape=}") - dst_tensor.data.copy_(tp_hp_fragment.data) + + if key == FP32_WEIGHT_KEY: + dst_tensor = hp_mapping.get_hp_fragment() + assert dst_tensor.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' + dst_tensor.data.copy_(tp_hp_fragment.data) + else: + assert tp_hp_fragment.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {tp_hp_fragment.numel()} != src numel {lp_frag_address.numel}' + + hp_mapping.optim_fragment[key] = tp_hp_fragment.clone().detach() + + return step def enable_universal_checkpoint(param_list): diff --git a/deepspeed/checkpoint/zero_checkpoint.py b/deepspeed/checkpoint/zero_checkpoint.py index c65745d3dd0c..c5d4102859fb 100644 --- a/deepspeed/checkpoint/zero_checkpoint.py +++ b/deepspeed/checkpoint/zero_checkpoint.py @@ -105,9 +105,11 @@ def _strip_tensor_paddings(self, sd): if group_paddings[key] == 0: continue for state_name, state_value in group_state.items(): - if torch.is_tensor(state_value): + if state_name != "step" and torch.is_tensor(state_value) and state_value.dim(): raw_length = state_value.numel() - group_paddings[key] group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone() + else: + group_state[state_name] = state_value def _clear_group_paddings(self, sd): group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) diff --git a/deepspeed/comm/torch.py b/deepspeed/comm/torch.py index d96c63d84e60..5f51165f6075 100644 --- a/deepspeed/comm/torch.py +++ b/deepspeed/comm/torch.py @@ -146,11 +146,7 @@ def has_reduce_scatter_tensor(self): def init_process_group(self, backend, timeout, init_method, rank, world_size): if not torch.distributed.is_initialized(): - torch.distributed.init_process_group(backend, - timeout=timeout, - init_method=init_method, - rank=rank, - world_size=world_size) + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) self.using_mpi = torch.distributed.get_backend() == 'mpi' @compiler.disable diff --git a/deepspeed/inference/engine.py b/deepspeed/inference/engine.py index 5cdd99ff0b90..1630756bece9 100755 --- a/deepspeed/inference/engine.py +++ b/deepspeed/inference/engine.py @@ -29,6 +29,7 @@ from ..module_inject.auto_tp_model_utils import build_bloom_alibi_tensor, build_mpt_atten_bias_tensor, build_mpt_alibi_tensor, get_alibi_mask from ..ops.transformer.inference.ds_attention import DeepSpeedSelfAttention from ..model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp DS_INFERENCE_ENABLED = False from torch import nn @@ -51,13 +52,8 @@ def __init__(self, model, config): DS_INFERENCE_ENABLED = True super().__init__() - - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module - if inference_module is not None: - self.destroy() + self.workspace = WorkspaceOp() + self.destroy() self.module = model self._config = config @@ -180,22 +176,18 @@ def __init__(self, model, config): dist.broadcast(_rng_state, 0) get_accelerator().set_rng_state(_rng_state.cpu()) - if config.tensor_parallel.tp_size > 1: + if config.enable_cuda_graph and get_accelerator().device_name() == 'hpu': + self.module = get_accelerator().wrap_in_hpu_graph(self.module) + elif config.tensor_parallel.tp_size > 1: assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" # Check if local CUDA graphs can be created in replacement modules self.local_cuda_graph = self._local_cuda_graph_used(self.module) def destroy(self): - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module DeepSpeedTransformerInference.layer_id = 0 DeepSpeedSelfAttention.num_layers = 0 - if inference_module is not None: - inference_module.release_workspace() - inference_module = None + self.workspace.release_workspace() def profile_model_time(self, use_cuda_events=True): if not self.model_profile_enabled and not self._config.enable_cuda_graph: @@ -321,7 +313,7 @@ def _validate_args(self, mpu, replace_with_kernel_inject): if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)): raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}") - supported_dtypes = [None, torch.half, torch.int8, torch.float] + supported_dtypes = [None, torch.half, torch.int8, torch.float, torch.bfloat16] if self._config.dtype not in supported_dtypes: raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") @@ -589,7 +581,8 @@ def forward(self, *inputs, **kwargs): **kwargs: variable length keyword arguments """ start = None - if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph: + if self.model_profile_enabled and (get_accelerator().device_name() == 'cuda' or get_accelerator().device_name() == 'hpu') and \ + self._config.enable_cuda_graph: get_accelerator().synchronize() start = time.time() diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index 0ca89dd0497a..03b4983a5ed8 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -31,7 +31,10 @@ from deepspeed.accelerator import get_accelerator DLTS_HOSTFILE = "/job/hostfile" -EXPORT_ENVS = ['MLFLOW', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS = [ + 'MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX', 'PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES', 'PT_HPU_LAZY_ACC_PAR_MODE', + 'PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE' +] EXPORT_ENVS += NEBULA_EXPORT_ENVS DEEPSPEED_ENVIRONMENT_NAME = os.getenv("DS_ENV_FILE", ".deepspeed_env") DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] @@ -483,6 +486,27 @@ def main(args=None): if args.elastic_training: assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training" + if get_accelerator().device_name() == 'hpu': + # TODO: SW-113485 need to remove the below WA once SW-113485 is unblocked + def update_wa_env_var(key, value): + if key not in os.environ.keys(): + env[key] = value + + update_wa_env_var("PT_HPU_LAZY_ACC_PAR_MODE", "0") + # todo SW-125782: remove DYNAMIC SHAPE disable WA + update_wa_env_var("PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES", "0") + # todo SW-145489: remove WEIGHT CPU PERMUTE WA after SW-145491 is resolved + update_wa_env_var("PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE", "0") + # determine available modules + try: + hl_smi = subprocess.check_output(['hl-smi', "-L"]) + num_accelerators = re.findall(r"Module ID\s+:\s+(\d+)", hl_smi.decode()) + except FileNotFoundError: + sim_list = subprocess.check_output(['ls', '-1', '/dev/accel']) + num_accelerators = re.findall(r"accel(\d+)", sim_list.decode()) + num_accelerators = sorted(num_accelerators, key=int) + update_wa_env_var("HABANA_VISIBLE_MODULES", ",".join(num_accelerators)) + # encode world info as base64 to make it easier to pass via command line world_info_base64 = encode_world_info(active_resources) diff --git a/deepspeed/model_implementations/transformers/ds_llama2.py b/deepspeed/model_implementations/transformers/ds_llama2.py index 7d9eb4113a8a..325bfb4f7e18 100644 --- a/deepspeed/model_implementations/transformers/ds_llama2.py +++ b/deepspeed/model_implementations/transformers/ds_llama2.py @@ -4,11 +4,8 @@ # DeepSpeed Team import torch -from deepspeed import comm as dist from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference -inference_module = None - class DeepSpeedLlama2Inference(DeepSpeedTransformerInference): """Initialize the DeepSpeed OPT Transformer Layer. @@ -27,18 +24,10 @@ def forward(self, *args, **kwargs): input = args[0] input_mask = None - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False - get_present = True + self.allocate_workspace(input.size()) + # We set the prev key/value to None when there is a prompt if input.shape[1] > 1: self.layer_past = None diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py index d87d0de997b5..51216ca00ade 100644 --- a/deepspeed/model_implementations/transformers/ds_transformer.py +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -6,19 +6,18 @@ import torch import torch.nn as nn from deepspeed import comm as dist +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.utils.logging import log_dist from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed if deepspeed.HAS_TRITON: from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention -inference_module = None - class DeepSpeedTransformerInference(nn.Module): """Initialize the DeepSpeed Transformer Layer. @@ -37,6 +36,7 @@ class DeepSpeedTransformerInference(nn.Module): for specific downstream tasks. """ layer_id = 0 + workspace = None def __init__(self, config, @@ -52,10 +52,6 @@ def __init__(self, DeepSpeedTransformerInference.layer_id += 1 data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedTransformerInference.layer_id == 1: log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) @@ -87,23 +83,27 @@ def __init__(self, requires_grad=False) self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), requires_grad=False) - self.layer_past = None - try: - if config.dtype == torch.float32: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - elif config.dtype == torch.bfloat16: - self.allocate_workspace = inference_module.allocate_workspace_bf16 - else: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - self._alloc_workspace = True - except AttributeError: - self.allocate_workspace = None - self._alloc_workspace = False + + self.layer_norm = LayerNormOp() + DeepSpeedTransformerInference.workspace = WorkspaceOp(self.config) + self._should_allocate_workspace = True + self.allocate_workspace_func = self.workspace.allocate_workspace + + def allocate_workspace(self, size): + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._should_allocate_workspace: + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedTransformerInference.layer_id, self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._should_allocate_workspace = False @classmethod def reset_cache(cls): - if inference_module is not None: - inference_module.reset_cache() + if cls.workspace is None: + cls.workspace = WorkspaceOp() + cls.workspace.reset_cache() def forward( self, @@ -136,23 +136,12 @@ def forward( input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False + self.allocate_workspace(input.size()) get_present = (get_present or get_key_value or use_cache) input_mask = input_mask if attention_mask is None else attention_mask - # We set the prev key/value to None when there is a prompt - if input.shape[1] > 1: - self.layer_past = None - layer_past = layer_past if layer_past is not None else self.layer_past + layer_past = past_key_value if past_key_value is not None else layer_past head_mask = layer_head_mask if layer_head_mask is not None else head_mask attn_mask = None @@ -178,14 +167,14 @@ def forward( output_attentions, self.norm_w, self.norm_b, - alibi) + alibi, + **kwargs) presents = (key, value) - self.layer_past = presents if layer_past is None else None output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) if not self.config.pre_layer_norm: - output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) + output = self.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) output = output.to(input_type) if get_present: diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py index bf9c2d74c635..2597e18fe926 100644 --- a/deepspeed/module_inject/auto_tp.py +++ b/deepspeed/module_inject/auto_tp.py @@ -133,7 +133,7 @@ def is_load_module(module): load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm] load_layer_names = [ "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear", - "MistralRMSNorm", "T5LayerNorm" + "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm" ] return module.__class__ in load_layers or module._get_name() in load_layer_names @@ -303,6 +303,9 @@ def tp_parser(model): elif 'self_attention.dense' in layer and 'falcon' in str( type(module)): # this is a hack to get the right linear layer for this model! gem_list = gem_list + [layer] + # Mixtral-7x8b used w2*act(w1*w3) linear. need to replace w2 to linearallreduce. + elif 'w2' in layer and 'mixtral' in str(type(module)): + gem_list = gem_list + [layer] layer_list = [] if gem_list != []: diff --git a/deepspeed/module_inject/containers/base.py b/deepspeed/module_inject/containers/base.py index 83e109167ffe..ab6286325eed 100644 --- a/deepspeed/module_inject/containers/base.py +++ b/deepspeed/module_inject/containers/base.py @@ -203,6 +203,12 @@ def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): self._4hh_b = _4hh_b def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + #TODO SW-164572: remove below mark_step WA once SW-164573 is resolved. + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + if thpu.is_initialized(): + import habana_frameworks.torch.core as htcore + htcore.mark_step() self.attn_nw = attn_nw self.attn_nb = attn_nb self.input_nw = input_nw diff --git a/deepspeed/module_inject/containers/bloom.py b/deepspeed/module_inject/containers/bloom.py index 05f30eec8d85..c103b17e1559 100644 --- a/deepspeed/module_inject/containers/bloom.py +++ b/deepspeed/module_inject/containers/bloom.py @@ -23,6 +23,7 @@ def __init__(self, **kwargs): # All model specific things should be defined here instead of the base class. self.bigscience_bloom = True + self.triangular_masking = False def create_module(self, config=None): _config = config if config is not None else self.ds_model_config diff --git a/deepspeed/module_inject/load_checkpoint.py b/deepspeed/module_inject/load_checkpoint.py index 4d01fdc69869..0244b3c8d235 100644 --- a/deepspeed/module_inject/load_checkpoint.py +++ b/deepspeed/module_inject/load_checkpoint.py @@ -17,6 +17,7 @@ import gc from deepspeed.accelerator import get_accelerator import re +from os import getenv def load_model_with_checkpoint(r_module, @@ -28,6 +29,8 @@ def load_model_with_checkpoint(r_module, rank=0, container=None): error_msgs = [] + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + llama_large_seq_perf_workaround = getenv("DEEPSPEED_LLAMA_LARGE_SEQ_PERF_WORKAROUND", "0").lower() in ["1", "true"] def prefix_check(): # if keys start with 'model.' or 'transformer.', don't skip level 0 prefix @@ -40,7 +43,11 @@ def prefix_check(): return False return True - skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + if llama_large_seq_perf_workaround and container is None: + skip_level_0_prefix = prefix_check() + else: + skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix def transpose(data): with torch.no_grad(): @@ -224,6 +231,11 @@ def load_parameters(module, prefix): RMSNorm: load } + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + if llama_large_seq_perf_workaround: + print(f"Removed RMSNorm from layer_policies") + del layer_policies[RMSNorm] + all_ds_ids = {} def load_module_recursive(module, prefix='', level=0): @@ -248,17 +260,26 @@ def load_module_recursive(module, prefix='', level=0): if child.__class__ is nn.LayerNorm: child = Normalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps) setattr(module, name, child) - elif child.__class__ in [nn.Linear, ColumnParallelLinear, RowParallelLinear]: + elif (not llama_large_seq_perf_workaround + and child.__class__ in [nn.Linear, ColumnParallelLinear, RowParallelLinear]): + child = LinearLayer(weight_shape=child.weight.shape, bias=child.bias) + setattr(module, name, child) + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + elif llama_large_seq_perf_workaround and child.__class__ is nn.Linear: child = LinearLayer(weight_shape=child.weight.shape, dtype=child.weight.dtype, bias=child.bias) setattr(module, name, child) elif child.__class__ is OPTLearnedPositionalEmbedding: child = OPTEmbedding(weight_shape=ds_shape) setattr(module, name, child) - elif child.__class__ in [LlamaRMSNorm, RMSNorm]: + elif not llama_large_seq_perf_workaround and child.__class__ in [LlamaRMSNorm, RMSNorm]: child = RMSNormalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps if hasattr(child, 'eps') else child.variance_epsilon) setattr(module, name, child) + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + elif llama_large_seq_perf_workaround and child.__class__ is LlamaRMSNorm: + child = RMSNormalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.variance_epsilon) + setattr(module, name, child) else: ds_id = None if hasattr(child.weight, 'ds_id'): diff --git a/deepspeed/module_inject/policy.py b/deepspeed/module_inject/policy.py index 41df2b85dc0c..d9cb8c2d9eb5 100644 --- a/deepspeed/module_inject/policy.py +++ b/deepspeed/module_inject/policy.py @@ -27,7 +27,7 @@ class DSPolicy(ABC): _orig_layer_class = None def __init__(self): - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True @abstractmethod def attention(self): @@ -62,7 +62,7 @@ def __init__( # Type of normalization to perform norm_type=NormType.LayerNorm): super().__init__() - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True self.inference = inference self.linear_layer = linear_layer self.scale_attention = scale_attention diff --git a/deepspeed/module_inject/replace_module.py b/deepspeed/module_inject/replace_module.py index e1703562d180..ccf096707707 100644 --- a/deepspeed/module_inject/replace_module.py +++ b/deepspeed/module_inject/replace_module.py @@ -202,7 +202,9 @@ def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, m micro_batch_size = -1 seed = -1 local_rank = -1 - + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + llama_large_seq_perf_workaround = os.getenv("DEEPSPEED_LLAMA_LARGE_SEQ_PERF_WORKAROUND", + "0").lower() in ["1", "true"] mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group, mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1) @@ -336,7 +338,8 @@ def set_lm_head(module): module = replace_wo_policy(module, ("embed_out", ), 0, "embed_out") return module - if checkpoint_dict is not None and not config.replace_with_kernel_inject: + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + if not llama_large_seq_perf_workaround and checkpoint_dict is not None and not config.replace_with_kernel_inject: # AutoTP shard loading checkpoint = checkpoint_dict["checkpoints"] pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards") @@ -359,9 +362,11 @@ def set_lm_head(module): quantizer = GroupQuantizer(q_int8=quantize) world_size = dist.get_world_size() if dist.is_initialized() else 1 rank = dist.get_rank() if dist.is_initialized() else 0 - if checkpoint_dict is not None and config.replace_with_kernel_inject: - assert container_g.ckpt_load_enabled, \ - f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container" + # TODO SW-170491: remove the below WA once SW-170491 is resolved. + if checkpoint_dict is not None and (config.replace_with_kernel_inject or llama_large_seq_perf_workaround): + if not llama_large_seq_perf_workaround or get_accelerator().device_name() != "hpu": + assert container_g.ckpt_load_enabled, \ + f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container" start_time = time.time() checkpoint = checkpoint_dict['checkpoints'] ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint diff --git a/deepspeed/module_inject/tp_shard.py b/deepspeed/module_inject/tp_shard.py index 79c19b5f1272..29e3e485a632 100644 --- a/deepspeed/module_inject/tp_shard.py +++ b/deepspeed/module_inject/tp_shard.py @@ -5,6 +5,8 @@ from deepspeed import comm as dist global num_kv_heads +# TODO: SW-184584 remove this WA. +is_old_shard_size = None def set_num_kv_heads(num): @@ -24,12 +26,17 @@ def get_num_kv_heads(): def get_shard_size(total_size, mp_size, name=None, rank=None): global num_kv_heads + # TODO: SW-184584 remove this WA. + global is_old_shard_size + if is_old_shard_size is None: + import os + is_old_shard_size = os.environ.get("HPU_DS_OLD_SHARD_SIZE", "1").lower() in ["true", "1"] last_linear = ["lm_head", "embed_out"] # When we have num_kv_heads defined, uneven division is possible, otherwise enforce near even division if rank == None: rank = dist.get_rank() - if num_kv_heads != None and total_size % num_kv_heads == 0 and "mlp" not in str(name) and str( - name) not in last_linear: + if num_kv_heads != None and (is_old_shard_size or (total_size % num_kv_heads == 0 and "mlp" not in str(name) + and str(name) not in last_linear)): my_slices = (num_kv_heads // mp_size) + (1 if rank < (num_kv_heads % mp_size) else 0) return total_size * my_slices // num_kv_heads else: diff --git a/deepspeed/moe/layer.py b/deepspeed/moe/layer.py index dfa9fcf4f464..ef99469262a0 100644 --- a/deepspeed/moe/layer.py +++ b/deepspeed/moe/layer.py @@ -12,6 +12,7 @@ from deepspeed.utils import groups, log_dist from .experts import Experts from .sharded_moe import MOELayer, TopKGate +from deepspeed.accelerator import get_accelerator class MoE(nn.Module): @@ -61,7 +62,8 @@ def __init__(self, self.expert_group_name = f"ep_size_{self.ep_size}" self.num_experts = num_experts self.num_local_experts = num_experts // self.ep_size - + #TODO SW-179530: remove workaround when issue with lazy is resolved (see SW-179530). + expert.to(get_accelerator().device_name()) log_dist( f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', [0]) @@ -71,7 +73,7 @@ def __init__(self, experts = Experts(expert, self.num_local_experts, self.expert_group_name) self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor, - min_capacity, noisy_gate_policy, drop_tokens, use_rts, + min_capacity, noisy_gate_policy, drop_tokens, use_rts, None, top2_2nd_expert_sampling), experts, self.expert_group_name, diff --git a/deepspeed/moe/mappings.py b/deepspeed/moe/mappings.py index 6c501ea6503a..b8a06274343a 100644 --- a/deepspeed/moe/mappings.py +++ b/deepspeed/moe/mappings.py @@ -23,6 +23,8 @@ import torch import deepspeed +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_world_size, bwc_tensor_model_parallel_rank, + bwc_tensor_model_parallel_group) def _gather_tokens(input_, dim=0): @@ -31,11 +33,11 @@ def _gather_tokens(input_, dim=0): input_ = input_.contiguous() # Size and dimension. - rank = mpu.get_tensor_model_parallel_rank() + rank = bwc_tensor_model_parallel_rank(mpu) - tensor_list = [torch.empty_like(input_) for _ in range(mpu.get_tensor_model_parallel_world_size())] + tensor_list = [torch.empty_like(input_) for _ in range(bwc_tensor_model_parallel_world_size(mpu))] tensor_list[rank] = input_ - deepspeed.comm.all_gather(tensor_list, input_, group=mpu.get_tensor_model_parallel_group()) + deepspeed.comm.all_gather(tensor_list, input_, group=bwc_tensor_model_parallel_group(mpu)) # Note: torch.cat already creates a contiguous tensor. output = torch.cat(tensor_list, dim=dim).contiguous() @@ -47,8 +49,8 @@ def _drop_tokens(input_, dim=0): """Divide a tensor among the tensor parallel ranks""" mpu = deepspeed.utils.groups.mpu - total_chunks = mpu.get_tensor_model_parallel_world_size() - this_chunk = mpu.get_tensor_model_parallel_rank() + total_chunks = bwc_tensor_model_parallel_world_size(mpu) + this_chunk = bwc_tensor_model_parallel_rank(mpu) assert input_.shape[ dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})" chunk_size = input_.shape[dim] // total_chunks @@ -92,7 +94,7 @@ def backward(ctx, input_): def gather_tokens(input_, dim=0): mpu = deepspeed.utils.groups.mpu - if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: # no tensor parallelism for non-experts return input_ return _GatherTokens.apply(input_, dim) @@ -100,7 +102,7 @@ def gather_tokens(input_, dim=0): def drop_tokens(input_, dim=0): mpu = deepspeed.utils.groups.mpu - if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: # no tensor parallelism for non-experts return input_ return _DropTokens.apply(input_, dim) diff --git a/deepspeed/moe/sharded_moe.py b/deepspeed/moe/sharded_moe.py index d6c023ec11d3..0971af83173a 100644 --- a/deepspeed/moe/sharded_moe.py +++ b/deepspeed/moe/sharded_moe.py @@ -17,7 +17,8 @@ from deepspeed.utils.timer import SynchronizedWallClockTimer from deepspeed.utils import logger -from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size +from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union import torch from torch import Tensor @@ -184,6 +185,7 @@ def top1gating(logits: Tensor, noisy_gate_policy: Optional[str] = None, drop_tokens: bool = True, use_rts: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top1Gating on logits.""" if noisy_gate_policy == 'RSample': @@ -209,11 +211,13 @@ def top1gating(logits: Tensor, # if we don't want to drop any tokens if not drop_tokens: new_capacity = torch.max(exp_counts).to(logits.device) - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group()) + # Communicate across expert processes to pick the maximum capacity. + if ep_group is not None: + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. # This is since we are going to activate drop_tokens() to drop duplicate tokens. - tp = 1 if groups.mpu is None else groups.mpu.get_tensor_model_parallel_world_size() + tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) capacity = new_capacity @@ -284,6 +288,7 @@ def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int, drop_tokens: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top2Gating on logits.""" # everything is in fp32 in this function @@ -326,11 +331,12 @@ def top2gating(logits: Tensor, else: # Do not drop tokens - set capacity according to current expert assignments new_capacity = torch.max(exp_counts) - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group()) + if ep_group is not None: + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. # This is since we are going to activate drop_tokens() to drop duplicate tokens. - tp = 1 if groups.mpu is None else groups.mpu.get_tensor_model_parallel_world_size() + tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) capacity = new_capacity @@ -374,7 +380,7 @@ class TopKGate(Module): Args: model_dim (int): size of model embedding dimension - num_experts (ints): + num_experts (int): number of experts in model """ @@ -390,6 +396,7 @@ def __init__(self, noisy_gate_policy: Optional[str] = None, drop_tokens: bool = True, use_rts: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, top2_2nd_expert_sampling: bool = True) -> None: super().__init__() @@ -397,6 +404,7 @@ def __init__(self, if k != 1 and k != 2: raise ValueError('Only top-1 and top-2 gatings are supported.') self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) + self.ep_group = ep_group self.k = k self.capacity_factor = capacity_factor self.eval_capacity_factor = eval_capacity_factor @@ -409,6 +417,10 @@ def __init__(self, self.use_rts = use_rts self.top2_2nd_expert_sampling = top2_2nd_expert_sampling + def _set_ep_group(self, ep_group): + assert self.ep_group is None, f'Attempting to override an existing ep_group' + self.ep_group = ep_group + def forward(self, input: torch.Tensor, used_token: torch.Tensor = None, @@ -426,11 +438,11 @@ def forward(self, if self.k == 1: gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, self.min_capacity, used_token, self.noisy_gate_policy if self.training else None, - self.drop_tokens, self.use_rts, use_tutel) + self.drop_tokens, self.use_rts, self.ep_group, use_tutel) else: gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, - self.min_capacity, self.drop_tokens, self.top2_2nd_expert_sampling) + self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling) if self.wall_clock_breakdown: self.timers(TOPK_GATE_TIMER).stop() @@ -490,6 +502,7 @@ def __init__(self, def _set_ep_group(self, ep_group): self.ep_group = ep_group + self.gate._set_ep_group(ep_group) def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: diff --git a/deepspeed/moe/utils.py b/deepspeed/moe/utils.py index 8e1faffc3541..f52fe2e3442d 100644 --- a/deepspeed/moe/utils.py +++ b/deepspeed/moe/utils.py @@ -146,3 +146,7 @@ def split_params_into_different_moe_groups_for_optimizer( param_groups.append(param_group) return param_groups + + +def is_moe_param_group(param_group): + return param_group.get('moe', False) diff --git a/deepspeed/ops/transformer/inference/config.py b/deepspeed/ops/transformer/inference/config.py index d5aff4f541f7..f21bbc82cb52 100644 --- a/deepspeed/ops/transformer/inference/config.py +++ b/deepspeed/ops/transformer/inference/config.py @@ -101,7 +101,6 @@ def __init__(self, self.return_tuple = return_tuple self.mlp_after_attn = mlp_after_attn self.mlp_act_func_type = mlp_act_func_type - self.specialized_mode = False self.training_mp_size = training_mp_size self.bigscience_bloom = bigscience_bloom self.max_out_tokens = max_out_tokens diff --git a/deepspeed/ops/transformer/inference/diffusers_attention.py b/deepspeed/ops/transformer/inference/diffusers_attention.py index 5efc560db75e..c384ee77c03d 100644 --- a/deepspeed/ops/transformer/inference/diffusers_attention.py +++ b/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -10,10 +10,11 @@ from packaging import version as pkg_version from deepspeed.utils.logging import log_dist from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp +from deepspeed.ops.transformer.inference.op_binding.softmax_context import SoftmaxContextOp +from deepspeed.ops.transformer.inference.op_binding import LinearOp +from deepspeed.ops.transformer.inference.op_binding.pad_transform import PadTransformOp -# Cuda modules will be imported if needed -inference_module = None minus_inf = -10000.0 triton_flash_attn = None @@ -36,7 +37,8 @@ class DeepSpeedDiffusersAttentionFunction(Function): @staticmethod def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb, num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob, - do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta): + do_out_bias, score_context_func, linear_func, pad_transform_func, triton_flash_attn_kernel, + rope_theta): def _transpose_for_context(x): x = x.permute(0, 2, 1, 3) @@ -77,7 +79,7 @@ def selfAttention_fp(input, context, input_mask): query = query.contiguous() key = key.contiguous() value = value.contiguous() - query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + query, key, value = pad_transform_func(query, key, value, config.heads, do_flash_attn) attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1) context_layer = _transpose_for_context(torch.matmul(attention_scores, value)) @@ -117,10 +119,6 @@ def __init__( data_type = self.config.dtype data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedDiffusersAttention.layer_id == 1: log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) @@ -171,26 +169,24 @@ def __init__( self.norm_factor *= math.sqrt(self.config.layer_id + 1) # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 - if self.config.dtype in [torch.float16, torch.int8]: - self.score_context_func = inference_module.softmax_context_fp16 - self.linear_func = inference_module.linear_layer_fp16 - self.allocate_workspace = inference_module.allocate_workspace_fp16 - else: - self.score_context_func = inference_module.softmax_context_fp32 - self.linear_func = inference_module.linear_layer_fp32 - self.allocate_workspace = inference_module.allocate_workspace_fp32 + self.allocate_workspace_func = WorkspaceOp(self.config).allocate_workspace + self.score_context_func = SoftmaxContextOp(self.config) + self.linear_func = LinearOp(self.config) + self.pad_transform_func = PadTransformOp(self.config) - def forward(self, input, context=None, input_mask=None): + def allocate_workspace(self, size): + # Allocate memory only on first layer forward if self.config.layer_id == 0: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, - 0, self.config.max_out_tokens, self.config.min_out_tokens) - output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw, - self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb, - self.num_attention_heads_per_partition, self.norm_factor, - self.hidden_size_per_partition, self.attn_ow, self.attn_ob, - self.do_out_bias, self.score_context_func, self.linear_func, - self.triton_flash_attn_kernel, self.config.rope_theta) + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, 0, + self.config.max_out_tokens, self.config.min_out_tokens) + + def forward(self, input, context=None, input_mask=None): + self.allocate_workspace(input.size()) + output = DeepSpeedDiffusersAttentionFunction.apply( + input, context, input_mask, self.config, self.attn_qkvw, self.attn_qw, self.attn_kw, self.attn_vw, + self.attn_qkvb, self.num_attention_heads_per_partition, self.norm_factor, self.hidden_size_per_partition, + self.attn_ow, self.attn_ob, self.do_out_bias, self.score_context_func, self.linear_func, + self.pad_transform_func, self.triton_flash_attn_kernel, self.config.rope_theta) return output diff --git a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py index b0156f905a06..d01638f36e40 100644 --- a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py +++ b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py @@ -10,26 +10,9 @@ from .diffusers_attention import DeepSpeedDiffusersAttention from .bias_add import nhwc_bias_add from .diffusers_2d_transformer import Diffusers2DTransformerConfig -from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder from deepspeed.utils.types import ActivationFuncType - -# Ops will be loaded on demand -transformer_cuda_module = None -spatial_cuda_module = None - - -def load_transformer_module(): - global transformer_cuda_module - if transformer_cuda_module is None: - transformer_cuda_module = InferenceBuilder().load() - return transformer_cuda_module - - -def load_spatial_module(): - global spatial_cuda_module - if spatial_cuda_module is None: - spatial_cuda_module = SpatialInferenceBuilder().load() - return spatial_cuda_module +from .op_binding.gated_activation import GatedActivationOp +from .op_binding.layer_norm import LayerNormOp class DeepSpeedDiffusersTransformerBlock(nn.Module): @@ -76,8 +59,8 @@ def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerC else: self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False) - self.transformer_cuda_module = load_transformer_module() - load_spatial_module() + self.gated_activation = GatedActivationOp() + self.layer_norm = LayerNormOp() def forward(self, hidden_states, context=None, timestep=None, **kwargs): # In v0.12.0 of diffuser, several new kwargs were added. Capturing @@ -88,17 +71,17 @@ def forward(self, hidden_states, context=None, timestep=None, **kwargs): if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None: context = kwargs["encoder_hidden_states"] - out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) + out_norm_1 = self.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) out_attn_1 = self.attn_1(out_norm_1) - out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_2, out_attn_1 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps) out_attn_2 = self.attn_2(out_norm_2, context=context) - out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_3, out_attn_2 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps) out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w) - out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) + out_geglu = self.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) out_ff2 = nn.functional.linear(out_geglu, self.ff2_w) return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2) diff --git a/deepspeed/ops/transformer/inference/ds_attention.py b/deepspeed/ops/transformer/inference/ds_attention.py index eb6ce2f75c69..e04369f0efd1 100644 --- a/deepspeed/ops/transformer/inference/ds_attention.py +++ b/deepspeed/ops/transformer/inference/ds_attention.py @@ -89,7 +89,7 @@ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device) ] - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -108,7 +108,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=DeepSpeedSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -136,7 +139,8 @@ def forward(self, output_attentions=False, norm_w=None, norm_b=None, - alibi=None): + alibi=None, + **kwargs): if self.attn_qkvw is None: self._attn_qkvw, self._attn_qkvb = self._merge_qkv() else: @@ -157,10 +161,17 @@ def forward(self, gamma=norm_w, beta=norm_b) + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) + context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] @@ -210,7 +221,7 @@ def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_ return tensor_list - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -246,8 +257,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): matmul_result = torch.matmul(query_layer, key_layer) # change view to [batch_size, num_heads, q_length, k_length] attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1) - - offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + if self.config.mp_size > 1 and dist.is_initialized(): + offset = dist.get_rank() * self.num_attention_heads_per_partition + else: + offset = 0 target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64 @@ -255,7 +268,7 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): input_mask = input_mask.long() attention_probs = self.softmax_func(attn_scores=attention_scores, - attn_mask=((1 - input_mask).to(target_dtype) * minus_inf), + attn_mask=input_mask.to(target_dtype) * minus_inf, alibi=alibi, triangular=(self.config.triangular_masking and (attention_scores.shape[-2] > 1)), diff --git a/deepspeed/ops/transformer/inference/moe_inference.py b/deepspeed/ops/transformer/inference/moe_inference.py index 90bfcae81bf2..da3981c13714 100644 --- a/deepspeed/ops/transformer/inference/moe_inference.py +++ b/deepspeed/ops/transformer/inference/moe_inference.py @@ -7,16 +7,16 @@ import math import torch from torch.autograd import Function -# accelerator modules will be imported if needed -inference_module = None -specialized_mode = None import torch.nn as nn from .ds_attention import DeepSpeedSelfAttention from .config import DeepSpeedInferenceConfig +from .op_binding import SoftmaxOp, VectorMatMulOp, GELUGemmOp +from .op_binding.bias_residual import BiasResidualOp +from .op_binding.einsum_sec_sm_ecm import EinsumSecSmEcmOp +from .op_binding.layer_norm import LayerNormOp from ....moe.sharded_moe import TopKGate from deepspeed import comm as dist -from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from .op_binding.moe_res_matmul import MoEResMatmulOp class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): @@ -110,16 +110,13 @@ class DeepSpeedMLPFunction(Function): @staticmethod def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group, - async_op): + async_op, gelu_gemm_func, vector_matmul_func): if config.q_int8: - intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2], - (q_groups * (2**merge_count)), config.pre_layer_norm) - output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count)) + intermediate = gelu_gemm_func(input, inter_w, inter_b, config.epsilon, q_scales[2], + (q_groups * (2**merge_count)), config.pre_layer_norm) + output = vector_matmul_func(intermediate, output_w, q_scales[3], q_groups, (merge_count)) else: - mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \ - inference_module.fused_gemm_gelu_fp32 - - output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) + output = gelu_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) if mp_group is not None and dist.get_world_size(group=mp_group) > 1: dist.all_reduce(output, group=mp_group, async_op=async_op) @@ -150,10 +147,13 @@ def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_g self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups self.merge_count = int(math.log2(merge_count)) self.mp_group = mp_group + self.gelu_gemm_func = GELUGemmOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) def forward(self, input, async_op=False): return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w, - self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op) + self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op, + self.gelu_gemm_func, self.vector_matmul_func) class DeepSpeedMoEInference(nn.Module): @@ -187,18 +187,7 @@ def __init__(self, self.config = config self.config.layer_id = DeepSpeedMoEInference.layer_id - global inference_module - global specialized_mode - if inference_module is None: - specialized_mode = False - # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string - builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") - if builder is not None and builder.is_compatible(): - inference_module = builder.load() - specialized_mode = True - else: - inference_module = InferenceBuilder().load() - self.config.specialized_mode = specialized_mode + assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support" DeepSpeedMoEInference.layer_id += 1 @@ -213,10 +202,8 @@ def __init__(self, self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, mp_group) self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2)) - self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.softmax_fp32 - self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \ - inference_module.vector_matmul_fp32 + self.coef_func = SoftmaxOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) config.mp_size = 1 self.mlp = nn.ModuleList( @@ -226,7 +213,7 @@ def __init__(self, self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k, self.config.capacity_factor, self.config.eval_capacity_factor, self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens, - self.config.use_rts) + self.config.use_rts, self.ep_group) self.ep_group = ep_group self.mp_group = mp_group @@ -234,12 +221,10 @@ def __init__(self, print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__) - self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.bias_residual_fp32 - self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.layer_norm_fp32 - self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.einsum_sec_sm_ecm_fp32 + self.bias_residual_func = BiasResidualOp(self.config) + self.ds_layernorm = LayerNormOp(self.config) + self.einsum_sec_sm_ecm = EinsumSecSmEcmOp(self.config) + self.moe_res_matmul = MoEResMatmulOp(self.config) def res_coef_func(self, inp, async_op): inp = self.vector_matmul_func(inp, self.res_coef, async_op) @@ -346,7 +331,7 @@ def forward(self, dim=0)[dist.get_rank(group=self.expert_mp_group)] if self.config.mlp_type == 'residual': - inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output) + self.moe_res_matmul(res_mlp_out, res_coef_out, output) output = self.bias_residual_func(output, residual_add, torch.empty(1)) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_add.py b/deepspeed/ops/transformer/inference/op_binding/bias_add.py new file mode 100644 index 000000000000..d2ae38f546eb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_add.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasAddOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_add_func = self.inference_module.bias_add_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_add_func = self.inference_module.bias_add_bf16 + else: + self.bias_add_func = self.inference_module.bias_add_fp32 + except AttributeError: + self.bias_add_func = self.bias_add_fallback + + @classmethod + def bias_add_fallback(cls, input, bias): + return torch.add(input, bias) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_add_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py new file mode 100644 index 000000000000..f0fee0b0d06e --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasGeluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasGeluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_gelu_func = self.inference_module.bias_gelu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_gelu_func = self.inference_module.bias_gelu_bf16 + else: + self.bias_gelu_func = self.inference_module.bias_gelu_fp32 + except AttributeError: + self.bias_gelu_func = self.bias_gelu_fallback + + @classmethod + def bias_gelu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally and using the tanh approximation + return F.gelu(activations.to(torch.float32) + bias.to(torch.float32), approximate='tanh').to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_gelu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_relu.py b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py new file mode 100644 index 000000000000..ccfade1d9524 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasReluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasReluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_relu_func = self.inference_module.bias_relu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_relu_func = self.inference_module.bias_relu_bf16 + else: + self.bias_relu_func = self.inference_module.bias_relu_fp32 + except AttributeError: + self.bias_relu_func = self.bias_relu_fallback + + @classmethod + def bias_relu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally + return F.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_relu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_residual.py b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py new file mode 100644 index 000000000000..ecad50e10ffe --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasResidualOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasResidualOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.bias_residual_func = self.inference_module.bias_residual_fp16 + else: + self.bias_residual_func = self.inference_module.bias_residual_fp32 + except AttributeError: + self.bias_residual_func = self.bias_residual_fallback + + @classmethod + def bias_residual_fallback(cls, output, residual, bias): + raise NotImplementedError("bias residual fallback isn't implemented") + + def forward(self, output, residual, bias): + return self.bias_residual_func(output, residual, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py new file mode 100644 index 000000000000..f34b10f786d1 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class EinsumSecSmEcmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(EinsumSecSmEcmOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp16 + else: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp32 + except AttributeError: + self.einsum_sec_sm_ecm_func = self.einsum_sec_sm_ecm_fallback + + @classmethod + def einsum_sec_sm_ecm_fallback(cls, Q, W): + raise NotImplementedError("einsum sec sm ecm fallback isn't implemented") + + def forward(self, Q, W): + return self.einsum_sec_sm_ecm_func(Q, W) diff --git a/deepspeed/ops/transformer/inference/op_binding/gated_activation.py b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py new file mode 100644 index 000000000000..d28d818ce4b3 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from deepspeed.utils.types import ActivationFuncType +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class GatedActivationOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(GatedActivationOp, self).__init__(config) + try: + self.gated_activation_func = self.inference_module.gated_activation + except AttributeError: + self.gated_activation_func = self.gated_activation_fallback + + @classmethod + def gated_activation_fallback(cls, activation, bias, activation_func_type): + # Expected behavior is that of casting to float32 internally + # Explicitly using the default GeLU + activation_func = None + activations = activation + bias.reshape(1, 1, -1) + hidden_states, gate = activations.chunk(2, dim=-1) + + if activation_func_type == ActivationFuncType.GATED_SILU: + activation_func = F.silu + elif activation_func_type == ActivationFuncType.GATED_GELU: + activation_func = F.gelu + + return hidden_states * activation_func(gate.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor, activation_func_type: ActivationFuncType): + return self.gated_activation_func(activation, bias, activation_func_type) diff --git a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py index 63323c150752..60bbb4b48bdb 100644 --- a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -4,6 +4,7 @@ # DeepSpeed Team import torch +import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp import deepspeed @@ -14,7 +15,9 @@ class GELUGemmOp(BaseOp): def __init__(self, config: DeepSpeedInferenceConfig): super(GELUGemmOp, self).__init__(config) try: - if self.config.dtype in [torch.float16, torch.int8]: + if self.config.dtype == torch.int8: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_int8 + elif self.config.dtype == torch.float16: if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore @@ -28,7 +31,11 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.fused_gemm_gelu = self.gelu_gemm_fallback def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose): - raise NotImplementedError + tmp = torch.matmul(input, weight) + tmp = F.gelu(tmp.to(torch.float32) + bias.to(torch.float32), approximate="tanh").to(tmp.dtype) + output = torch.matmul(tmp, out) + + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/layer_norm.py b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py new file mode 100644 index 000000000000..31219a58ac3c --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class LayerNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + super(LayerNormOp, self).__init__(config) + try: + if config is None: + self.layer_norm_func = self.inference_module.layer_norm + elif self.config.dtype in [torch.float16, torch.int8]: + self.layer_norm_func = self.inference_module.layer_norm_fp16 + else: + self.layer_norm_func = self.inference_module.layer_norm_fp32 + except AttributeError: + self.layer_norm_func = self.layer_norm_fallback + + @classmethod + def layer_norm_residual(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f + bias_f + res_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + @classmethod + def layer_norm_residual_store_pre_ln_res(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + res_output = vals_f + bias_f + res_f + norm_output = F.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + return norm_output, res_output.to(dtype) + + @classmethod + def layer_norm_fallback(cls, vals, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + def forward(self, vals, gamma, beta, epsilon): + return self.layer_norm_func(vals, gamma, beta, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py index 3064c00d1755..97daf8b74bd8 100644 --- a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -5,12 +5,12 @@ from typing import Optional -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp from deepspeed.utils.types import NormType +from .pre_rms_norm import PreRMSNormOp class MLPGemmOp(BaseOp): @@ -39,23 +39,46 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.mlp_gemm_func = self.mlp_gemm_fallback elif self.config.norm_type == NormType.RMSNorm: self.mlp_gemm_func = self.rms_mlp_gemm_fallback + self.pre_rms_norm = PreRMSNormOp() def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose: - residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, - self.config.epsilon) - tmp = torch.matmul(residual_add, weight_interm) + if mlp_after_attn: + residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(residual_add, weight_interm.t() if transpose else weight_interm) tmp = F.gelu(tmp + bias) - output = torch.matmul(tmp, weight_out) - return (output, residual_add) + output = torch.matmul(tmp, weight_out.t() if transpose else weight_out) + + return output, residual_add else: + # TODO: SW-151870 implement mlp_gemm_fallback raise NotImplementedError def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - raise NotImplementedError + inp_norm, residual = self.pre_rms_norm(input, residual, gamma, eps) + tmp = torch.matmul(inp_norm.view([-1, inp_norm.size(2)]), weight_interm.t() if transpose else weight_interm) + up_proj, gate_proj = tmp.chunk(2, dim=1) + + from deepspeed.utils.types import ActivationFuncType + if mlp_act_func_type == ActivationFuncType.GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.ReLU: + intermediate = F.relu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_SILU: + intermediate = F.silu(gate_proj) + else: + raise f"rms_mlp_gemm_fallback not implemented for activation type {mlp_act_func_type}" + + intermediate = intermediate * up_proj + + output = torch.matmul(intermediate, weight_out.t() if transpose else weight_out) + output = output.view([input.size(0), input.size(1), -1]) + + return [output, residual] def forward(self, input: torch.Tensor, diff --git a/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py new file mode 100644 index 000000000000..ef3558c8bc88 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class MoEResMatmulOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(MoEResMatmulOp, self).__init__(config) + try: + self.moe_res_matmul_func = self.inference_module.moe_res_matmul + except AttributeError: + self.moe_res_matmul_func = self.moe_res_matmul_fallback + + @classmethod + def moe_res_matmul_fallback(cls, residual, coef, output): + coef_t = coef.transpose(1, 2).contiguous() + coef1, coef2 = torch.split(coef_t, split_size_or_sections=coef_t.shape[len(coef_t.shape) - 1] // 2, dim=-1) + return residual * coef1 + output * coef2 + + def forward(self, residual, coef, output): + return self.moe_res_matmul_func(residual, coef, output) diff --git a/deepspeed/ops/transformer/inference/op_binding/pad_transform.py b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py new file mode 100644 index 000000000000..876fefc3bcfb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class PadTransformOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PadTransformOp, self).__init__(config) + try: + self.pad_transform_func = self.inference_module.pad_transform_fp16 + except AttributeError: + self.pad_transform_func = self.pad_transform_fallback + + @staticmethod + def pad_transform_fallback(query, key, value, heads, do_flash_attn): + raise NotImplementedError("pad_transform fallback is not implemented.") + + def forward(self, query, key, value, heads, do_flash_attn): + return self.pad_transform_func(query, key, value, heads, do_flash_attn) diff --git a/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py new file mode 100644 index 000000000000..7969d20f0527 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from .rms_norm import RMSNormOp + + +class PreRMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PreRMSNormOp, self).__init__(config) + try: + self.pre_rms_norm_func = self.inference_module.pre_rms_norm + except AttributeError: + self.pre_rms_norm_func = self.pre_rms_norm_fallback + + @staticmethod + def pre_rms_norm_fallback(vals, residual, gamma, epsilon): + residual = vals.to(torch.float32) + residual.to(torch.float32) + vals = residual + + return RMSNormOp.rms_norm_fallback(vals, gamma, epsilon), residual.to(gamma.dtype) + + def forward(self, vals, residual, gamma, epsilon): + return self.pre_rms_norm_func(vals, residual, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py index 250bf9864e1e..9ff5366fae5d 100644 --- a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .rms_norm import RMSNormOp import deepspeed from deepspeed.utils.types import NormType @@ -56,19 +56,23 @@ def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): matmul(A, B) Fp16Matmul._update_autotune_table() - def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) - tmp = torch.matmul(inp_norm, weight) - if add_bias: - tmp += bias - output = [tmp, inp_norm] - return output - else: - raise NotImplementedError + @staticmethod + def qkv_gemm_fallback(input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + + return output + + @staticmethod + def rms_qkv_gemm_fallback(input, weight, q_scale, gamma, eps, q_int8, transpose): + inp_norm = RMSNormOp.rms_norm_fallback(input, gamma, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + output = [tmp, inp_norm] - def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose): - raise NotImplementedError + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/deepspeed/ops/transformer/inference/op_binding/residual_add.py index 6f9b35cbc05d..93b229c5d1ac 100644 --- a/deepspeed/ops/transformer/inference/op_binding/residual_add.py +++ b/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -3,9 +3,10 @@ # DeepSpeed Team -import os import torch from typing import Optional + +from .vector_add import VectorAddOp from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -22,11 +23,32 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.residual_add_func = self.inference_module.residual_add_bias_fp32 except AttributeError: - self.residual_add_func = None - try: - self._vector_add = self.inference_module._vector_add - except AttributeError: - self._vector_add = None + self.residual_add_func = self.residual_add_fallback + self.vector_add = VectorAddOp() + + @staticmethod + def res_add_bias(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size): + hidden_state += attn_output + (residual + final_bias) / mp_size + if add_attn_bias: + hidden_state += attn_bias / mp_size + + return hidden_state + + @staticmethod + def residual_add_fallback(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + tmp = ResidualAddOp.res_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + add_bias, mp_size) + residual.copy_(tmp.to(hidden_state.dtype)) + + return residual def forward(self, hidden_state: torch.Tensor, @@ -37,28 +59,15 @@ def forward(self, attention_bias: Optional[torch.Tensor] = None, final_bias: Optional[torch.Tensor] = None): - if self.residual_add_func is not None: - if final_bias is None: - residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size) - else: - if not self.config.pre_layer_norm and residual_add is not None: - # only use residual add if its set and we are not pre layer norm - residual = residual_add - - self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, - self.config.mp_size, self.config.mlp_after_attn, add_bias, - self.config.pre_layer_norm) + if final_bias is None and attention_bias is None: + residual = self.vector_add(residual + attention_output, hidden_state, 1.0 / self.config.mp_size) else: - # fallback - if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn: - if self.config.pre_layer_norm: - tmp = (residual.float() + attention_output.float() + attention_bias.float() + - final_bias.float()) / self.config.mp_size + hidden_state.float() - else: - tmp = residual.float() + hidden_state.float() + final_bias.float() + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, + self.config.mp_size, self.config.mlp_after_attn, add_bias, + self.config.pre_layer_norm) - input_dtype = hidden_state.dtype - residual = tmp.to(input_dtype) - else: - raise NotImplementedError return residual diff --git a/deepspeed/ops/transformer/inference/op_binding/rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py new file mode 100644 index 000000000000..128883ce5d43 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class RMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(RMSNormOp, self).__init__(config) + try: + self.rms_norm_func = self.inference_module.rms_norm + except AttributeError: + self.rms_norm_func = self.rms_norm_fallback + + @staticmethod + def rms_norm_fallback(vals, gamma, epsilon): + variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) + vals = vals * torch.rsqrt(variance + epsilon) + + if gamma.dtype in [torch.float16, torch.bfloat16]: + vals = vals.to(gamma.dtype) + + return gamma * vals + + def forward(self, vals, gamma, epsilon): + return self.rms_norm_func(vals, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax.py b/deepspeed/ops/transformer/inference/op_binding/softmax.py index bc309d94df14..b408883d5cfd 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxOp(BaseOp): @@ -25,24 +25,45 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_func = self.softmax_fallback - def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, - async_op, layer_scale, head_offset, mp_size): - if os.environ.get('DS_KI_FALLBACK') == 'True': - alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition] - input_dtype = attn_scores.dtype - if (triangular): - tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) - attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min) - if alibi is not None: - attn_scores += alibi - if attn_mask is not None: - # expand atten_mask from two dim into 4 dim, insert two dims in the middle + @staticmethod + def softmax_fallback(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, async_op, + layer_scale, head_offset, mp_size): + scores_len = len(attn_scores.size()) + heads = 1 + if scores_len > 1: + heads = attn_scores.size()[1] + num_attention_heads_per_partition = heads // mp_size + + if alibi is not None: + if len(alibi.shape) == 1: + alibi = None + else: + alibi = alibi[head_offset:head_offset + num_attention_heads_per_partition] + if attn_mask is not None and len(attn_mask.shape) == 1: + attn_mask = None + input_dtype = attn_scores.dtype + attn_scores *= layer_scale + + if alibi is not None: + attn_scores += alibi + if attn_mask is not None: + # expand atten_mask from two dim into 4 dim, insert two dims in the middle + if len(attn_mask.shape) == 2: + # The above if statement was added because the mask was already 4D so this + # expansion should be avoided as it expands to 6D and crashes later (in bloom + # HE KI FB) attn_mask = attn_mask[:, None, None, :] - attn_scores += attn_mask - output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) - return output - else: - raise NotImplementedError + attn_scores += attn_mask + if triangular: + if attn_scores.shape[2] == 1: # query using kv cache + token_idx = InferenceContext.Instance().current_tokens() + tri = torch.arange(attn_scores.shape[2], device=attn_scores.device).ge(token_idx) + else: + tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) + attn_scores = torch.masked_fill(attn_scores, tri, float('-inf')) + output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) + + return output def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool, recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float, diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py index 0dc4e08a3633..b9b521e4cd53 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -7,6 +7,8 @@ from deepspeed import comm as dist from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .softmax import SoftmaxOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxContextOp(BaseOp): @@ -23,13 +25,109 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_context_func = self.softmax_context_fallback + @staticmethod + def transform4d_0213(x, seq_length): + assert x.dim() == 3, F"{x.dim()=} is not supported" + batch_size, num_heads, seq_length_head_dim = x.shape + head_dim = seq_length_head_dim // seq_length + x = x.view(batch_size, num_heads, seq_length, head_dim) + x = x.permute(0, 2, 1, 3) + + return x + + @staticmethod + def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep <= 1 or num_key_value_heads == 1: + return hidden_states + + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + @staticmethod + def bias_add_transform_0213(input, bias, num_heads, trans_count, perform_bias=False): + assert trans_count == 1 or trans_count == 3, F"{trans_count=} is not supported" + assert input.dim() == 3, F"{input.dim()=} is not supported" + input_biased = torch.add(input, bias) if perform_bias else input + batch_size, seq_length, value_size = input_biased.shape + hid_dim = value_size // trans_count + head_dim = hid_dim // num_heads + + if trans_count == 1: + query_layer = input.view(batch_size, seq_length, num_heads, head_dim) + query_layer = query_layer.permute(0, 2, 1, 3) + key_layer = torch.zeros_like(query_layer) + value_layer = torch.zeros_like(query_layer) + return query_layer, key_layer, value_layer + + qkv_layers = input.view(batch_size, seq_length, 3, num_heads, head_dim) + query_layer, key_layer, value_layer = qkv_layers[..., 0, :, :], qkv_layers[..., 1, :, :], qkv_layers[..., + 2, :, :] + query_layer = query_layer.transpose(1, 2) + key_layer = key_layer.transpose(1, 2) + value_layer = value_layer.transpose(1, 2) + + return query_layer, key_layer, value_layer + def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, norm_factor, triangular_masking, local_attention, window_size, no_masking, - layer_id, num_layers, alibi, rope_theta): - raise NotImplementedError + layer_id, num_layers, alibi, rope_theta, is_prompt, token_idx, position_ids): + bat_0213_query, bat_0213_key, bat_0213_value = self.bias_add_transform_0213( + query_key_value, None, heads, 3, False) + + if rotary_dim > 0 and rotate_half: + from transformers.models.llama.modeling_llama import apply_rotary_pos_emb + + rotary = InferenceContext.Instance().get_rotary(rotary_dim, rope_theta, bat_0213_value.device) + cos, sin = rotary(bat_0213_value, InferenceContext.Instance().get_max_tokens_num()) + # TODO: SW-170999 Optimize RoPE implementation. + bat_0213_query, bat_0213_key = apply_rotary_pos_emb(bat_0213_query, bat_0213_key, cos, sin, position_ids) + + bat_0213_key, bat_0213_value = InferenceContext.Instance().update_cache(layer_id, token_idx, is_prompt, + bat_0213_key, bat_0213_value) + + bat_0213_key = self.repeat_kv(bat_0213_key, num_kv) + bat_0213_value = self.repeat_kv(bat_0213_value, num_kv) + + bsz = query_key_value.shape[0] + head_dim = query_key_value.shape[2] // (heads * 3) + + bmm_output = torch.bmm(bat_0213_query.reshape(bsz * heads, bat_0213_query.shape[2], head_dim), + bat_0213_key.reshape(bsz * heads, bat_0213_key.shape[2], head_dim).transpose(1, 2)) + + layer_scale = 1.0 + if alibi is not None and len(alibi.shape) > 1: + layer_scale = max(1, layer_id).to(float) + + alpha = norm_factor * norm_factor / layer_scale + bmm_output *= alpha + bmm_output_reshape = bmm_output.reshape(bsz, heads, bmm_output.shape[1], bmm_output.shape[2]) + + recompute = is_prompt + if attn_mask is not None and len(attn_mask.shape) > 1 and attn_mask.shape[-1] < bmm_output_reshape.shape[3]: + attn_mask = torch.nn.functional.pad(attn_mask, (0, bmm_output_reshape.shape[3] - attn_mask.shape[-1]), + value=torch.finfo(attn_mask.dtype).min) + softmax_output = SoftmaxOp.softmax_fallback(bmm_output_reshape, attn_mask, alibi, triangular_masking, + recompute, local_attention, window_size, None, layer_scale, 0, 1) + + output = torch.bmm(softmax_output.reshape(bsz * heads, softmax_output.shape[2], softmax_output.shape[3]), + bat_0213_value.reshape(bsz * heads, bat_0213_value.shape[2], head_dim)) + + output = output.reshape(bsz, heads, output.shape[1], head_dim) + output = output.reshape(bsz, heads, output.shape[2] * head_dim) + input_seq_len = query_key_value.shape[1] + t4d_0123_output = self.transform4d_0213(output, input_seq_len) + t4d_0123_output = t4d_0123_output.reshape(bsz, t4d_0123_output.shape[1], heads * head_dim) + + if layer_id == num_layers - 1: + InferenceContext.Instance().advance_tokens() + + return t4d_0123_output, bat_0213_key, bat_0213_value def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, num_kv: int, - norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor): + norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor, + is_prompt: bool, token_idx: torch.Tensor, position_ids: torch.Tensor): if alibi is not None: batch_heads = query_key_value.shape[0] * heads @@ -42,6 +140,6 @@ def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: self.config.rotate_every_two, heads, num_kv, norm_factor, self.config.triangular_masking, self.config.local_attention, self.config.window_size, no_masking, layer_id, num_layers, alibi, - self.config.rope_theta) + self.config.rope_theta, is_prompt, token_idx, position_ids) return output diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_add.py b/deepspeed/ops/transformer/inference/op_binding/vector_add.py new file mode 100644 index 000000000000..015340a1084b --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/vector_add.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class VectorAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(VectorAddOp, self).__init__(config) + try: + self.vector_add_func = self.inference_module._vector_add + except AttributeError: + self.vector_add_func = self.vector_add_fallback + + @classmethod + def vector_add_fallback(cls, a, b, gamma): + """Based on csrc/transformer/inference/csrc/pt_binding.cpp code of _vector_add""" + dtype = a.dtype + return (gamma * a.float() + b.float()).to(dtype) + + def forward(self, a, b, gamma): + return self.vector_add_func(a, b, gamma) diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py index 011be859634d..cabab8d8c4ab 100644 --- a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py +++ b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os import torch from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -25,7 +24,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.vector_matmul_func = self.inference_module.vector_matmul_fp16 elif self.config.dtype == torch.int8: - self.vector_matmul_func = self.inference_module.vector_matmul_fp16 + self.vector_matmul_func = self.inference_module.vector_matmul_int8 elif self.config.dtype == torch.bfloat16: self.vector_matmul_func = self.inference_module.vector_matmul_bf16 else: @@ -34,10 +33,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.vector_matmul_func = self.vector_matmul_fallback def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - return torch.matmul(input, weight) - else: - raise NotImplementedError + return torch.matmul(input, weight.t() if transpose else weight) def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False): q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) diff --git a/deepspeed/ops/transformer/inference/op_binding/workspace.py b/deepspeed/ops/transformer/inference/op_binding/workspace.py new file mode 100644 index 000000000000..b06b1ca0bd32 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/workspace.py @@ -0,0 +1,204 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + +minus_inf = -10000.0 +key_idx = 0 +value_idx = 1 + + +class InferenceContext: + + __instance = None + + def __init__(self): + self.kv_cache = None + self.kv_cache_elem_dtype = None + self.num_tokens = 1 + self.kv_cache_num_layers = None + self.kv_cache_size = None + self.max_out_tokens = None + self.rotary = None + self.allocate_called = False + self.static_shapes = True + + @classmethod + def Instance(cls): + if InferenceContext.__instance is None: + InferenceContext.__instance = InferenceContext() + return InferenceContext.__instance + + def gen_workspace(self, num_layers, num_heads, batch_size, prompt_len, hidden_dim, mp_size, external_cache, + elem_dtype, rank, max_out_tokens, min_out_tokens): + self.allocate_called = True + self.kv_cache = None + if not external_cache: + self.kv_cache_num_layers = num_layers + self.max_out_tokens = max_out_tokens + head_size = hidden_dim // num_heads + self.kv_cache_size = torch.Size([batch_size, (num_heads // mp_size), max_out_tokens, head_size]) + self.kv_cache_elem_dtype = elem_dtype + self.num_tokens = 0 + self.static_shapes = True + return True + + def retake_workspace(self): + return True + + def _retake_workspace(self): + assert self.allocate_called, "retake workspace called before allocate workspace" + + import deepspeed.accelerator as accelerator + if self.kv_cache is None: + self.kv_cache = [] + for layer in range(self.kv_cache_num_layers): + self.kv_cache.append((torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()), + torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()))) + + return True + + def update_cache(self, layer_id, token_idx, is_prompt, bat_0213_key, bat_0213_value): + has_workspace = self._retake_workspace() + assert has_workspace, "Could not allocate workspace" + + # Update current token + if is_prompt: + self.static_shapes = True + if token_idx is None: + self.static_shapes = False + InferenceContext.Instance().reset_tokens(bat_0213_key.shape[2]) + else: + InferenceContext.Instance().reset_tokens(token_idx) + + if token_idx is None: + token_idx = InferenceContext.Instance().current_tokens() + + bsz = bat_0213_key.shape[0] + + # Update cache content + if is_prompt: + cache_max_seq = self.kv_cache_size[2] + cache_max_head_dim = self.kv_cache_size[3] + seq = bat_0213_key.shape[2] + + mask = torch.arange(cache_max_seq, device=bat_0213_key.device) + mask = mask.ge(token_idx) + mask = mask.unsqueeze(-1) + mask = mask.expand([cache_max_seq, cache_max_head_dim]) + + self.kv_cache[layer_id][key_idx][:bsz, :, :seq, :].copy_(bat_0213_key) + self.kv_cache[layer_id][key_idx][:bsz, :].masked_fill_(mask, 0) + self.kv_cache[layer_id][value_idx][:bsz, :, :seq, :].copy_(bat_0213_value) + self.kv_cache[layer_id][value_idx][:bsz, :].masked_fill_(mask, 0) + else: + if self.static_shapes: + assert type(token_idx) == torch.Tensor, "token_idx is expected to be torch.Tensor" + self.kv_cache[layer_id][key_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_key) + self.kv_cache[layer_id][value_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_value) + else: + assert type(token_idx) == int, "token_idx is expected to be int" + self.kv_cache[layer_id][key_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_key + self.kv_cache[layer_id][value_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_value + + bat_0213_key = self.kv_cache[layer_id][key_idx][:bsz] + bat_0213_value = self.kv_cache[layer_id][value_idx][:bsz] + + if not self.static_shapes: + bat_0213_key = bat_0213_key[:, :, :token_idx, :] + bat_0213_value = bat_0213_value[:, :, :token_idx, :] + + return bat_0213_key, bat_0213_value + + def release_workspace(self): + self.kv_cache = None + self.rotary = None + + def reset_tokens(self, initial_tokens=1): + self.num_tokens = initial_tokens + + def current_tokens(self): + return self.num_tokens + + def advance_tokens(self): + self.num_tokens = self.num_tokens + 1 + + def get_kv_cache(self): + return self.kv_cache + + def get_rotary(self, rotary_dim, rope_theta, device=None): + if self.rotary is None: + from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding + + self.rotary = LlamaRotaryEmbedding(rotary_dim, base=rope_theta, device=device) + + return self.rotary + + def get_max_tokens_num(self): + return self.max_out_tokens + + +class WorkspaceOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(WorkspaceOp, self).__init__(config) + + self.inference_context = InferenceContext.Instance() + try: + if config.dtype == torch.float32: + self.allocate_workspace = self.inference_module.allocate_workspace_fp32 + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.inference_module.allocate_workspace_bf16 + else: + self.allocate_workspace = self.inference_module.allocate_workspace_fp16 + self.release_workspace = self.inference_module.release_workspace + self.retake_workspace = self.inference_module.retake_workspace + self.reset_cache = self.inference_module.reset_cache + except AttributeError: + if config.dtype == torch.float32: + self.allocate_workspace = self.allocate_workspace_fp32_fallback + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.allocate_workspace_bf16_fallback + else: + self.allocate_workspace = self.allocate_workspace_fp16_fallback + self.release_workspace = self.release_workspace_fallback + self.retake_workspace = self.retake_workspace_fallback + self.reset_cache = self.reset_cache_fallback + + def allocate_workspace_fp32_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.float, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_bf16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.bfloat16, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_fp16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.half, rank, max_out_tokens, + min_out_tokens) + + def reset_cache_fallback(self): + return self.inference_context.reset_tokens() + + def release_workspace_fallback(self): + return self.inference_context.release_workspace() + + def retake_workspace_fallback(self): + return self.inference_context.retake_workspace() diff --git a/deepspeed/ops/transformer/inference/triton/attention.py b/deepspeed/ops/transformer/inference/triton/attention.py index c05370ec74e5..6845d91b06be 100644 --- a/deepspeed/ops/transformer/inference/triton/attention.py +++ b/deepspeed/ops/transformer/inference/triton/attention.py @@ -125,7 +125,7 @@ def _triton_autotune(min_seqlen, context_4d_matmul(output, qkv, head_size) Fp16Matmul._update_autotune_table() - def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list): qkv_out = qkv_out[0] @@ -143,7 +143,10 @@ def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=TritonSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -161,7 +164,8 @@ def forward( norm_w=None, norm_b=None, alibi=None, - use_triton_attention=True): + use_triton_attention=True, + **kwargs): if not self.config.pre_layer_norm: qkv_out = self.linear_func(input=input, @@ -192,10 +196,16 @@ def forward( triangular=self.triangular_masking) key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:] else: + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + toke_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] diff --git a/deepspeed/ops/transformer/inference/triton/ops.py b/deepspeed/ops/transformer/inference/triton/ops.py index dd87d08d4d2c..dbed45313780 100644 --- a/deepspeed/ops/transformer/inference/triton/ops.py +++ b/deepspeed/ops/transformer/inference/triton/ops.py @@ -3,12 +3,10 @@ # DeepSpeed Team -import deepspeed -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual - -inference_module = None +from deepspeed.utils.types import ActivationFuncType def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode): @@ -76,15 +74,12 @@ def mlp_gemm_func(input, if use_triton_ln: mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + mlp_input = LayerNormOp.layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) # activation - if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU: + if ActivationFuncType(mlp_act_func_type) == ActivationFuncType.GELU: activation = "gelu" - elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU: + elif ActivationFuncType(mlp_act_func_type) == ActivationFuncType.ReLU: activation = "relu" else: activation = "" @@ -121,10 +116,7 @@ def qkv_gemm_func( if use_triton_ln: qkv_input = layer_norm(input, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon) + qkv_input = LayerNormOp()(input, gamma, beta, epsilon) qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) diff --git a/deepspeed/runtime/__init__.py b/deepspeed/runtime/__init__.py index 347ff7993d82..208299fb8c50 100644 --- a/deepspeed/runtime/__init__.py +++ b/deepspeed/runtime/__init__.py @@ -2,11 +2,3 @@ # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team - - -class DeepSpeedOptimizer(object): - pass - - -class ZeROOptimizer(DeepSpeedOptimizer): - pass diff --git a/deepspeed/runtime/activation_checkpointing/checkpointing.py b/deepspeed/runtime/activation_checkpointing/checkpointing.py index 02e0b197e927..0a0850d62ccc 100644 --- a/deepspeed/runtime/activation_checkpointing/checkpointing.py +++ b/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -25,8 +25,9 @@ from deepspeed.runtime.config import DeepSpeedConfig from deepspeed.utils import logger -from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank +from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers, FORWARD_GLOBAL_TIMER +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator # DeepSpeed Checkpointing Enabled or Disabled @@ -605,6 +606,9 @@ def backward(ctx, *grads): # removing pointers to the contiguous buffer memory # so that they can be garbage collected once the checkpoints # have been used + if grads[0].device.type == 'hpu': + import habana_frameworks.torch as htorch + htorch.core.mark_step() if SYNCHRONIZE: get_accelerator().synchronize() if PROFILE_TIME: diff --git a/deepspeed/runtime/base_optimizer.py b/deepspeed/runtime/base_optimizer.py new file mode 100644 index 000000000000..6cfd66f1cc38 --- /dev/null +++ b/deepspeed/runtime/base_optimizer.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch + +from deepspeed.utils import logger +from deepspeed.utils.tensor_fragment import map_to_flat_opt_states +from deepspeed.runtime.utils import bwc_tensor_model_parallel_rank + + +class DeepSpeedOptimizer(object): + pass + + +class ZeROOptimizer(DeepSpeedOptimizer): + + def load_hp_checkpoint_state_from_checkpoint_dir(self, lp_groups_name: str, checkpoint_dir: str) -> None: + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt") + assert os.path.isfile( + optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.' + optim_sd = torch.load(optim_state_path) + + self._load_global_state(optim_sd) + + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + if self.mpu is None: + logger.warn("MPU is not provided, setting tp size to 1 in checkpoint loading.") + tp_world_size = 1 + else: + tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ + else self.mpu.get_tensor_model_parallel_world_size() + + for i, (param_group, + loaded_param_group) in enumerate(zip(self.optimizer.param_groups, optim_sd['param_groups'])): + # We have an assumption that all params in the same param_group have the same keys + opt_keys = set() + steps = [] + + lp_groups = getattr(self, lp_groups_name) + for lp in lp_groups[i]: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + step = lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + for key in lp._hp_mapping.get_optim_state_keys(): + opt_keys.add(key) + steps.append(step) + + hp_param = param_group['params'][0] + assert all(step == steps[0] for step in steps), f"Steps {steps} are not equal" + if steps[0] is not None: + self.optimizer.state[hp_param]['step'] = steps[0] + + map_to_flat_opt_states(hp_param, lp_groups[i], self.optimizer.state, opt_keys) + + for key, value in loaded_param_group.items(): + if key == 'params': + continue + param_group[key] = value diff --git a/deepspeed/runtime/bf16_optimizer.py b/deepspeed/runtime/bf16_optimizer.py index aaa836bf1c31..d4b42726fced 100644 --- a/deepspeed/runtime/bf16_optimizer.py +++ b/deepspeed/runtime/bf16_optimizer.py @@ -6,19 +6,19 @@ from collections import OrderedDict import torch import sys -import os from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from deepspeed import comm as dist from deepspeed.runtime.constants import PIPE_REPLICATED -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from packaging import version as pkg_version - from deepspeed.git_version_info import version from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, - align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank, - is_model_parallel_parameter, see_memory_usage, graph_process) + align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter, + see_memory_usage, graph_process, get_norm_with_moe_layers) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups +from deepspeed.moe.utils import is_moe_param, is_moe_param_group +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank -from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address from deepspeed.checkpoint import enable_universal_checkpoint from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, @@ -40,7 +40,8 @@ def __init__(self, timers=None, grad_acc_dtype=None, graph_harvesting=False, - immediate_grad_update=False): + immediate_grad_update=False, + has_moe_layers=False): super().__init__() see_memory_usage('begin bf16_optimizer', force=True) self.timers = timers @@ -59,7 +60,11 @@ def __init__(self, self.allgather_bucket_size = int(allgather_bucket_size) self.dp_process_group = dp_process_group self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.has_moe_layers = has_moe_layers + self.non_expert_gradients = [] self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + if self.has_moe_layers: + self._configure_moe_settings() # Use torch (un)flatten ops self.flatten = _flatten_dense_tensors @@ -90,11 +95,26 @@ def __init__(self, see_memory_usage('end bf16_optimizer', force=True) + def _configure_moe_settings(self): + assert any( + [is_moe_param_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_param_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name']) + self.expert_gradients = {} + if self.has_moe_layers: + for key in groups._get_expert_data_parallel_group_dict().keys(): + self.expert_gradients[key] = [] + def _setup_for_real_optimizer(self): - dp_world_size = dist.get_world_size(group=self.dp_process_group) - self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))] + self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group] for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) see_memory_usage(f'before initializing group {i}', force=True) partition_id = dist.get_rank(group=self.real_dp_process_group[i]) @@ -106,17 +126,16 @@ def _setup_for_real_optimizer(self): # create flat bf16 params self.bf16_groups_flat.append( self._flatten_dense_tensors_aligned(self.bf16_groups[i], - self.nccl_start_alignment_factor * dp_world_size)) - + self.nccl_start_alignment_factor * real_dp_world_size)) # Make bf16 params point to flat tensor storage self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) # divide flat weights into equal sized partitions - partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size bf16_dp_partitions = [ self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) - for dp_index in range(dp_world_size) + for dp_index in range(real_dp_world_size) ] self.bf16_partitioned_groups.append(bf16_dp_partitions) @@ -127,8 +146,12 @@ def _setup_for_real_optimizer(self): num_elem_list = [t.numel() for t in self.bf16_groups[i]] # create fp32 gradients - self.fp32_groups_gradients_flat.append( - torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype)) + fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) # track individual fp32 gradients for entire model fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], @@ -191,11 +214,12 @@ def _create_param_mapping(self): return param_mapping def _link_all_hp_params(self): - dp_world_size = dist.get_world_size(group=self.dp_process_group) for i, _ in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + # Link bf16 and fp32 params in partition partition_id = dist.get_rank(group=self.real_dp_process_group[i]) - partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size flat_hp_partition = self.fp32_groups_flat_partition[i] link_hp_params(lp_param_list=self.bf16_groups[i], flat_hp_partition=flat_hp_partition, @@ -257,10 +281,18 @@ def step(self, closure=None): if closure is not None: raise NotImplementedError(f'{self.__class__} does not support closure.') - all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(), - mpu=self.mpu, - norm_type=self.norm_type, - use_graph=self.graph_harvesting) + non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm() + non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm, + mpu=self.mpu, + norm_type=self.norm_type, + use_graph=self.graph_harvesting) + all_groups_norm = non_expert_groups_norm + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + self._global_grad_norm = all_groups_norm assert all_groups_norm > 0. @@ -271,8 +303,18 @@ def step(self, closure=None): mpu=self.mpu, use_graph=self.graph_harvesting) + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + self.optimizer.step() + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + # We need to link optimizer state after the first step() call self._lazy_init_hp_params_optimizer_state() @@ -292,7 +334,7 @@ def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwarg self.clear_lp_grads() loss.backward(**bwd_kwargs) - if update_hp_grads: + if not self.immediate_grad_update and update_hp_grads: self.update_hp_grads(clear_lp_grads=clear_lp_grads) @torch.no_grad() @@ -310,7 +352,7 @@ def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): # clear gradients if clear_lp_grads: - lp.grad._zero() + lp.grad.zero_() @torch.no_grad() def _update_hp_grads_func(self, clear_lp_grads=False): @@ -336,27 +378,55 @@ def update_hp_grads(self, clear_lp_grads=False): @torch.no_grad() def get_grads_for_reduction(self): - return self.fp32_groups_gradients_flat + if self.has_moe_layers: + return self.non_expert_gradients, self.expert_gradients + return self.non_expert_gradients, {} @torch.no_grad() def get_grads_for_norm(self, for_clipping=False): - grads = [] + """ + Returns: + tuple[list[Tensor], dict[ep_name, List[Tensor]] | list: + If for_clipping, return all gradients. + Otherwise, separate and return dict of expert_grad and list of non_expert_grad + """ + # (grads, expert_group_name) + expert_grads_for_norm = {} + + # grads + non_expert_grads_for_norm = [] + all_grads_for_clip = [] + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + assert len(self.bf16_groups) == len(self.optimizer.param_groups) for i, group in enumerate(self.bf16_groups): for j, lp in enumerate(group): if not for_clipping: if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: continue - if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)): + # skip duplicated parameters. perform norm only on cards with tp_rank=0. + # non-duplicated parameters include: + # - Parameters with tp: Use allreducesum of mp_group. + # - Moe Parameters with ep: Use allreducesum of ep_group. + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)): continue if not self.fp32_groups_has_gradients[i][j]: continue - - grads.append(self.fp32_groups_gradients[i][j]) - - return grads + if not for_clipping: + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j]) + else: + non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j]) + else: + all_grads_for_clip.append(self.fp32_groups_gradients[i][j]) + if not for_clipping: + return non_expert_grads_for_norm, expert_grads_for_norm + return all_grads_for_clip @torch.no_grad() def update_lp_params(self): @@ -368,6 +438,7 @@ def update_lp_params(self): # if i == 0: # print_rank_0(f'{fp32_partition[:10]=}', force=True) + #TODO: SW-90304 call all_gather_dp_groups with async_op=true if zero optimizer hpu_use_async_collectives is enabled all_gather_dp_groups(groups_flat=self.bf16_groups_flat, partitioned_param_groups=self.bf16_partitioned_groups, dp_process_group=self.real_dp_process_group, @@ -382,11 +453,20 @@ def clear_hp_grads(self): self.fp32_groups_has_gradients[i] = [False] * len(group) def clear_lp_grads(self): + + # using zero_() fixed memory address for graph replay + set_to_none = False if self.graph_harvesting else True + zero_grads_list = [] for group in self.bf16_groups: for param in group: - if param.grad is not None: - # Using zero_() fixed memory address for graph replay - param.grad.zero_() + if set_to_none: + param.grad = None + elif param.grad is not None: + if param.grad.grad_fn is not None: + param.grad.detach_() + zero_grads_list.append(param.grad) + if not set_to_none and len(zero_grads_list) > 0: + torch._foreach_zero_(zero_grads_list) def state_dict(self): state_dict = {} @@ -433,6 +513,7 @@ def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, l self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) if load_optimizer_states: + print(f"_load_legacy_checkpoint current_rank_sd[BASE_OPTIMIZER_STATE]") self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) if load_from_fp32_weights: @@ -445,25 +526,16 @@ def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, l self._link_all_hp_params() def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): - self._load_hp_checkpoint_state(checkpoint_folder) + self.load_hp_checkpoint_state_from_checkpoint_dir("bf16_groups", checkpoint_folder) + + def _load_global_state(self, sd): + pass @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups - def _load_hp_checkpoint_state(self, checkpoint_dir): - checkpoint_dir = os.path.join(checkpoint_dir, "zero") - tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) - tp_world_size = self.mpu.get_slice_parallel_world_size() - - for i, _ in enumerate(self.optimizer.param_groups): - for lp in self.bf16_groups[i]: - if lp._hp_mapping is not None: - #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") - lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, - tp_world_size) - def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): assert self.immediate_grad_update self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) diff --git a/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py b/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py index 5cd44864bb2e..67544c54c67e 100644 --- a/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py +++ b/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py @@ -19,7 +19,8 @@ def create(self, tag): def save(self, state_dict, path: str): logger.info(f"[Torch] Saving {path}...") - torch.save(state_dict, path) + # TODO SW-169821: remove WA: Once SW-169817 is solved to allow numpy() HPU un-detached tensors + save(state_dict, path, pickle_protocol=4) logger.info(f"[Torch] Saved {path}.") return None @@ -32,3 +33,25 @@ def load(self, path: str, map_location=None): def commit(self, tag): logger.info(f"[Torch] Checkpoint {tag} is ready now!") return True + + +# TODO SW-169821: remove WA: Once SW-169817 is solved to allow numpy() HPU un-detached tensors +def save(data, filename, pickle_protocol=4): + + def convert_for_pickle(obj): + if isinstance(obj, torch.Size): + return obj + elif isinstance(obj, dict): + return {k: convert_for_pickle(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_for_pickle(e) for e in obj] + elif isinstance(obj, tuple): + return tuple([convert_for_pickle(e) for e in obj]) + else: + if isinstance(obj, torch.Tensor): + return obj.data.detach().clone().cpu() + else: + return obj + + data = convert_for_pickle(data) + torch.save(data, filename, pickle_protocol=pickle_protocol) diff --git a/deepspeed/runtime/config.py b/deepspeed/runtime/config.py index 975fb1f21501..05ba91fd797c 100755 --- a/deepspeed/runtime/config.py +++ b/deepspeed/runtime/config.py @@ -169,6 +169,12 @@ def get_bfloat16_enabled(param_dict): return False +def get_fp8_optimizer_enabled(param_dict): + if FP8_OPTIMIZER in param_dict.keys(): + return get_scalar_param(param_dict[FP8_OPTIMIZER], FP8_OPTIMIZER_ENABLED, FP8_OPTIMIZER_ENABLED_DEFAULT) + return FP8_OPTIMIZER_ENABLED_DEFAULT + + def get_bfloat16_immediate_grad_update(param_dict): for key in [BFLOAT16, BFLOAT16_OLD]: if key in param_dict.keys(): @@ -508,6 +514,10 @@ def get_zero_force_ds_cpu_optimizer(param_dict): return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT) +def get_zero_allow_comm_data_type_fp32(param_dict): + return get_scalar_param(param_dict, ZERO_ALLOW_COMM_DATA_TYPE_FP32, ZERO_ALLOW_COMM_DATA_TYPE_FP32_DEFAULT) + + def get_scheduler_name(param_dict): if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys(): return param_dict[SCHEDULER][TYPE] @@ -829,6 +839,7 @@ def _initialize_params(self, param_dict): self.bfloat16_immediate_grad_update = get_bfloat16_immediate_grad_update(param_dict) assert not (self.fp16_enabled and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled' + self.fp8_optimizer_enabled = get_fp8_optimizer_enabled(param_dict) self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict) self.amp_enabled = get_amp_enabled(param_dict) self.amp_params = get_amp_params(param_dict) diff --git a/deepspeed/runtime/constants.py b/deepspeed/runtime/constants.py index 679230ca7d4c..c1c9e7477f12 100755 --- a/deepspeed/runtime/constants.py +++ b/deepspeed/runtime/constants.py @@ -130,6 +130,22 @@ BFLOAT16_IMMEDIATE_GRAD_UPDATE = "immediate_grad_update" BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT = False +######################################### +# FP8 optimizer support +######################################### +# By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +FP8_FORMAT = ''' +FP8 parameters should be of the format: +"fp8_optimizer": { + "enabled": true +} +''' +FP8_OPTIMIZER = "fp8_optimizer" + +FP8_OPTIMIZER_ENABLED = "enabled" +FP8_OPTIMIZER_ENABLED_DEFAULT = False + ######################################### # FP16 support ######################################### diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 5c1202ba06ae..e0636c489736 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -34,6 +34,7 @@ from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.runtime.fp8_optimizer import FP8_Optimizer from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ @@ -222,6 +223,7 @@ def __init__(self, self.num_experts = [] self.gate_modules = [] self.moe_layers = [] + self.has_sequence_parallel_params = False self._step_applied = False self._global_grad_norm = None self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. @@ -239,6 +241,10 @@ def __init__(self, self._configure_with_arguments(args, mpu) self._do_sanity_check() see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown()) + + if self.fp16_enabled() and not get_accelerator().is_fp16_supported(): + raise ValueError("Type fp16 is not supported.") + if mpu is not None: if self.elasticity_enabled(): if not self.is_elastic_model_parallel_supported(): @@ -249,6 +255,7 @@ def __init__(self, dist.configure(self._config) + self.fp8_optimizer = self._config.fp8_optimizer_enabled self.monitor = MonitorMaster(self._config.monitor_config) see_memory_usage( @@ -313,6 +320,14 @@ def __init__(self, elif self.bfloat16_enabled(): self.optimizer = self._configure_bf16_optimizer(optimizer=None) + #Sequence parallel related initialization + for param in self.module.parameters(): + if getattr(param, 'sequence_parallel', False): + self.has_sequence_parallel_params = True + break + if self.has_sequence_parallel_params: + assert self.mpu is not None, "sequence parallel allreduce only supported with tensor parallel enabled" + # Hook optimizer for snip_momentum pruning if hasattr(model, 'pruners'): from ..compression.helper import rewrite_optimizer_step @@ -455,7 +470,10 @@ def get_global_grad_norm(self) -> float: Returns: float: norm """ - return self._global_grad_norm + grad_norm = self._global_grad_norm + if isinstance(grad_norm, torch.Tensor): + grad_norm = grad_norm.item() + return grad_norm def __getattr__(self, name): """ @@ -977,13 +995,13 @@ def _set_distributed_vars(self, args): device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank if device_rank >= 0: get_accelerator().set_device(device_rank) - self.device = torch.device(get_accelerator().device_name(), device_rank) + self.device = torch.device(get_accelerator().device_name(device_rank)) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 - self.device = torch.device(get_accelerator().device_name()) + self.device = get_accelerator().device() # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): @@ -1254,6 +1272,8 @@ def _configure_optimizer(self, client_optimizer, model_parameters): if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) + elif self.fp8_optimizer: + self.optimizer = self._configure_fp8_optimizer(basic_optimizer) elif optimizer_wrapper == AMP: amp_params = self.amp_params() log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) @@ -1268,7 +1288,7 @@ def _configure_optimizer(self, client_optimizer, model_parameters): else: self.optimizer = basic_optimizer - log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0]) + log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer.__class__.__name__), ranks=[0]) self.compression_scheduler = self._configure_compression_scheduler() self.quantizer = self._configure_quantization() @@ -1478,7 +1498,29 @@ def _configure_bf16_optimizer(self, optimizer): timers=timers, grad_acc_dtype=self.get_data_types()[1], graph_harvesting=self.graph_harvesting(), - immediate_grad_update=self._config.bfloat16_immediate_grad_update) + immediate_grad_update=self._config.bfloat16_immediate_grad_update, + has_moe_layers=self.has_moe_layers) + + return optimizer + + def _configure_fp8_optimizer(self, optimizer): + clip_grad = self.gradient_clipping() + + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + log_dist('Creating FP8 optimizer', ranks=[0]) + + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + optimizer = FP8_Optimizer(optimizer, + self.param_names, + mpu=self.mpu, + clip_grad=clip_grad, + allgather_bucket_size=self.zero_allgather_bucket_size(), + dp_process_group=self.seq_data_parallel_group, + timers=timers, + grad_acc_dtype=self.get_data_types()[1], + immediate_grad_update=self._config.bfloat16_immediate_grad_update) return optimizer @@ -1609,8 +1651,7 @@ def _configure_zero_optimizer(self, optimizer): communication_data_type=self.communication_data_type, zero_hpz_partition_size=self.zero_hpz_partition_size(), zero_quantized_weights=self.zero_quantized_weights(), - zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights(), - ) + zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights()) else: raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage)) @@ -1924,9 +1965,6 @@ def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE): self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism) else: grads = None - if hasattr(self.optimizer, "get_grads_for_reduction"): - # This is currently for BF16 optimizer - grads = self.optimizer.get_grads_for_reduction() self.buffered_allreduce_fallback(grads=grads, elements_per_buffer=bucket_size) @instrument_w_nvtx @@ -1959,7 +1997,7 @@ def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_gr if self.global_rank == 0: self.summary_events = [( f"Train/Samples/train_loss", - self.losses.item(), + self.losses.item() / self.gradient_accumulation_steps(), self.global_samples, )] self.monitor.write_events(self.summary_events) @@ -2335,7 +2373,7 @@ def _report_progress(self, step): mom = self.get_mom() log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0]) - def allreduce_bucket(self, bucket, dp_group): + def allreduce_bucket(self, bucket, dp_group, dp_world_size=None): tensor = self.flatten(bucket) tensor_to_allreduce = tensor @@ -2343,16 +2381,18 @@ def allreduce_bucket(self, bucket, dp_group): if self.communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(self.communication_data_type) + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_predivide_factor() != 1.0: tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor()) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.gradient_average: - if self.gradient_predivide_factor() != dist.get_world_size(group=dp_group): - tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group)) + if self.gradient_predivide_factor() != dp_world_size: + tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dp_world_size) else: - tensor_to_allreduce.mul_(1. / dist.get_world_size(group=dp_group)) + tensor_to_allreduce.mul_(1. / dp_world_size) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: @@ -2360,23 +2400,23 @@ def allreduce_bucket(self, bucket, dp_group): return tensor - def allreduce_and_copy(self, small_bucket, dp_group): - allreduced = self.allreduce_bucket(small_bucket, dp_group) + def allreduce_and_copy(self, small_bucket, dp_group, dp_world_size=None): + allreduced = self.allreduce_bucket(small_bucket, dp_group, dp_world_size) for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): buf.copy_(synced) - def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000): + def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000, dp_world_size=None): small_bucket = [] numel = 0 for tensor in bucket: small_bucket.append(tensor) numel = numel + tensor.numel() if numel > numel_per_bucket: - self.allreduce_and_copy(small_bucket, dp_group) + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) small_bucket = [] numel = 0 if len(small_bucket) > 0: - self.allreduce_and_copy(small_bucket, dp_group) + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) def _get_gradients_for_reduction(self): non_expert_grads = [] @@ -2427,26 +2467,35 @@ def _reduce_non_expert_gradients(self, grads, elements_per_buffer): self.allreduce_no_retain(dense_bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) def _reduce_expert_gradients(self, expert_grads, elements_per_buffer): + # to maintain the gradients value unaffected by ep_size setting, + # utilize dp_world_size for allreduce average + dp_world_size = dist.get_world_size(groups._get_data_parallel_group()) for ep_name, expert_grads_group in expert_grads.items(): + ep_dp_group = groups._get_expert_data_parallel_group(ep_name) split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse( expert_grads_group) for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): if sparse_bucket_tuple: bucket_type, sparse_bucket = sparse_bucket_tuple - self.sparse_allreduce_no_retain(sparse_bucket, groups._get_expert_data_parallel_group(ep_name)) + self.sparse_allreduce_no_retain(sparse_bucket, dp_group=ep_dp_group, dp_world_size=dp_world_size) for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): if dense_bucket_tuple: bucket_type, dense_bucket = dense_bucket_tuple # Separate between diff groups self.allreduce_no_retain(dense_bucket, - dp_group=groups._get_expert_data_parallel_group(ep_name), - numel_per_bucket=elements_per_buffer) + dp_group=ep_dp_group, + numel_per_bucket=elements_per_buffer, + dp_world_size=dp_world_size) def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000): if grads is None: - non_expert_grads, expert_grads = self._get_gradients_for_reduction() + if hasattr(self.optimizer, "get_grads_for_reduction"): + # This is currently for BF16 optimizer + non_expert_grads, expert_grads = self.optimizer.get_grads_for_reduction() + else: + non_expert_grads, expert_grads = self._get_gradients_for_reduction() else: assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE" non_expert_grads = grads @@ -2456,8 +2505,19 @@ def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000) if self.has_moe_layers: self._reduce_expert_gradients(expert_grads, elements_per_buffer) - def sparse_allreduce_no_retain(self, bucket, dp_group): - allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group) + if self.has_sequence_parallel_params: + for i, group in enumerate(self.optimizer.bf16_groups): + if group is None: + dtypes = self.optimizer.optimizer.param_groups[i]['group_dtypes'] + group = self.optimizer.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if getattr(lp, 'sequence_parallel', False): + dist.all_reduce(self.optimizer.fp32_groups_gradients[i][j], + op=dist.ReduceOp.SUM, + group=self.mpu.get_slice_parallel_group()) + + def sparse_allreduce_no_retain(self, bucket, dp_group, dp_world_size=None): + allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group, dp_world_size) # Densify sparse tensor and copy back to original location for tensor in allreduced_sparses: if tensor.is_sparse: @@ -2465,13 +2525,13 @@ def sparse_allreduce_no_retain(self, bucket, dp_group): else: tensor.orig_dense_tensor.copy_(tensor.to_dense()) - def sparse_allreduce_bucket(self, bucket, dp_group): + def sparse_allreduce_bucket(self, bucket, dp_group, dp_world_size=None): sparse_list = [] for sparse in bucket: - sparse_list.append(self.sparse_allreduce(sparse, dp_group)) + sparse_list.append(self.sparse_allreduce(sparse, dp_group, dp_world_size)) return sparse_list - def sparse_allreduce(self, sparse, dp_group): + def sparse_allreduce(self, sparse, dp_group, dp_world_size=None): original_data_type = sparse.values.dtype if self.communication_data_type != sparse.values.dtype: if self.communication_data_type in (torch.float16, torch.bfloat16): @@ -2483,12 +2543,13 @@ def sparse_allreduce(self, sparse, dp_group): indices = sparse.indices values = sparse.values + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_average: - values.mul_(self.gradient_predivide_factor() / - (dist.get_world_size(group=dp_group) / float(self.sequence_parallel_size))) + values.mul_(self.gradient_predivide_factor() / (dp_world_size / float(self.sequence_parallel_size))) else: - values.mul_(1. / (dist.get_world_size(group=dp_group) / float(self.sequence_parallel_size))) + values.mul_(1. / (dp_world_size / float(self.sequence_parallel_size))) indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) @@ -2759,7 +2820,7 @@ def load_checkpoint(self, load_zero_checkpoint = load_path is not None and (self.zero_optimization() or self.bfloat16_enabled()) if load_zero_checkpoint: - if load_optimizer_states and not load_module_only: + if (load_optimizer_states and not load_module_only) or self.load_universal_checkpoint(): success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states) else: success = False @@ -2784,8 +2845,6 @@ def load_checkpoint(self, if self.load_universal_checkpoint(): self.optimizer.update_lp_params() - if load_zero_checkpoint: - self.update_optimizer_step(step=client_states['iteration'] + 1) return load_path, client_states @@ -2821,7 +2880,6 @@ def _load_checkpoint(self, self._curr_ckpt_path = os.path.join(load_dir, tag) if self.has_moe_layers: - # print(checkpoint.keys()) old_moe_load = False if not isinstance(checkpoint['num_experts'], list): old_moe_load = True @@ -2963,24 +3021,6 @@ def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True): logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}") return True - def update_optimizer_step(self, step): - - def set_step(d): - if isinstance(d['step'], torch.Tensor): - d['step'] = torch.tensor(step, dtype=d['step'].dtype, device=d['step'].device) - else: - d['step'] = step - - optimizer = self.optimizer - base_optimizer = optimizer.optimizer - state = base_optimizer.state - for group in optimizer.param_groups: - if 'step' in group: - set_step(group) - for p in group['params']: - if p in state and len(state[p]) > 0 and 'step' in state[p]: - set_step(state[p]) - def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode): zero_ckpt_names = [] for dp_rank in range(dp_world_size): @@ -3236,9 +3276,12 @@ def _save_moe_checkpoint(self, save_dir, tag, client_state={}, exclude_frozen_pa # Load flow uses below saved file for model parameters, RNG and more if groups._get_data_parallel_rank() == 0: - # get non-moe parameters + # Get non-moe parameters + # Classes DeepSpeedEngine and PipelineEngine have different behavior for method module_state_dict. + # DeepSpeedEngine returns the state dict, where PipelineEngine saves the state dict and returns None. + # We need to get the state dict, therefore, call to DeepSpeedEngine (base class for PipelineEngine) model_state_dict = self._get_non_moe_state_dict( - self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters)) + DeepSpeedEngine.module_state_dict(self, exclude_frozen_parameters=exclude_frozen_parameters)) # TODO: update num experts info,.. in checkpoint state = { diff --git a/deepspeed/runtime/fp16/fused_optimizer.py b/deepspeed/runtime/fp16/fused_optimizer.py index 182f806c839c..9ed250252e17 100755 --- a/deepspeed/runtime/fp16/fused_optimizer.py +++ b/deepspeed/runtime/fp16/fused_optimizer.py @@ -10,13 +10,13 @@ import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import DeepSpeedOptimizer -from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm, required_torch_version +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer +from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm, required_torch_version, get_norm_with_moe_layers from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE -from deepspeed.utils import groups, logger, log_dist -from deepspeed import comm as dist +from deepspeed.utils import logger, log_dist from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD from deepspeed.accelerator import get_accelerator +from deepspeed.moe.utils import is_moe_param_group OVERFLOW_CHECK_TIMER = 'overflow_check' COMPUTE_NORM_TIMER = 'compute_norm' @@ -237,6 +237,10 @@ def step(self, closure=None): return self.overflow grads_groups_flat = [] + non_experts_grads_for_norm = [] + expert_grads_for_norm = {} + assert len(self.fp16_groups) == len(self.optimizer.param_groups) + for i, group in enumerate(self.fp16_groups): data_type = self.fp32_groups_flat[i].dtype @@ -250,15 +254,25 @@ def step(self, closure=None): p.grad = None self.fp32_groups_flat[i].grad = grads_groups_flat[i] + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_flat[i]) + else: + non_experts_grads_for_norm.append(self.fp32_groups_flat[i]) self.timers(COMPUTE_NORM_TIMER).start() - all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu) + all_groups_norm = get_grad_norm(non_experts_grads_for_norm, mpu=self.mpu) self.timers(COMPUTE_NORM_TIMER).stop() if self.has_moe_layers: - all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm) + all_groups_norm = get_norm_with_moe_layers(all_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm]) @@ -290,20 +304,6 @@ def step(self, closure=None): return self.overflow - def _get_norm_with_moe_layers(self, all_groups_norm): - #all_groups_norm_old = all_groups_norm - # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce - if self.using_pipeline: - pg = self.deepspeed.mpu.get_data_parallel_group() - else: - pg = groups._get_data_parallel_group() - scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg)) - scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float) - dist.all_reduce(scaled_norm_tensor, group=pg) - all_groups_norm = scaled_norm_tensor.item() - #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") - return all_groups_norm - def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True): # compute combined scale factor for this group combined_scale = self.cur_scale diff --git a/deepspeed/runtime/fp16/unfused_optimizer.py b/deepspeed/runtime/fp16/unfused_optimizer.py index 14271255df2e..a7fd1910d7b2 100755 --- a/deepspeed/runtime/fp16/unfused_optimizer.py +++ b/deepspeed/runtime/fp16/unfused_optimizer.py @@ -11,7 +11,7 @@ import torch from torch._utils import _flatten_dense_tensors -from deepspeed.runtime import DeepSpeedOptimizer +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm, required_torch_version from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import logger diff --git a/deepspeed/runtime/fp8_optimizer.py b/deepspeed/runtime/fp8_optimizer.py new file mode 100644 index 000000000000..b8eb24cdd6ee --- /dev/null +++ b/deepspeed/runtime/fp8_optimizer.py @@ -0,0 +1,661 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from collections import OrderedDict +import torch +import sys +import os +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed import comm as dist +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from packaging import version as pkg_version + +from deepspeed.git_version_info import version +from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, + align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank, + is_model_parallel_parameter, see_memory_usage) + +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address +from deepspeed.checkpoint import enable_universal_checkpoint +from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, + SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, + PARAM_SLICE_MAPPINGS) + +setattr(sys.modules[__name__], 'fragment_address', fragment_address) + +from deepspeed.accelerator import get_accelerator + + +class FP8_Optimizer(ZeROOptimizer): + + def __init__(self, + init_optimizer, + param_names, + mpu=None, + clip_grad=0.0, + norm_type=2, + allgather_bucket_size=5000000000, + dp_process_group=None, + timers=None, + grad_acc_dtype=None, + immediate_grad_update=False): + super().__init__() + see_memory_usage('begin fp8_optimizer', force=True) + self.timers = timers + self.optimizer = init_optimizer + self.param_names = param_names + self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) + + self.clip_grad = clip_grad + self.norm_type = norm_type + self.mpu = mpu + self.allgather_bucket_size = int(allgather_bucket_size) + self.dp_process_group = dp_process_group + self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.grad_acc_dtype = grad_acc_dtype + self.immediate_grad_update = immediate_grad_update + self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + #align nccl all-gather send buffers to 4-bye boundary + self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 + + # Build BF16/FP32 groups + self.bf16_groups = [] + self.bf16_groups_flat = [] + self.bf16_partitioned_groups = [] + + self.fp8_param_groups_dict = {torch.float8_e5m2: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []},\ + torch.float8_e4m3fn: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []}} + + self.fp32_groups_flat_partition = [] + + # Maintain different fp32 gradients views for convenience + self.fp32_groups_gradients = [] + self.fp32_groups_gradient_dict = {} + self.fp32_groups_gradients_flat = [] + self.fp32_groups_actual_gradients_flat = [] + self.fp32_groups_gradient_flat_partition = [] + self.fp32_groups_has_gradients = [] + + self.group_paddings = [] + + if self.using_real_optimizer: + self._setup_for_real_optimizer() + + see_memory_usage('end fp8_optimizer', force=True) + + def _setup_for_real_optimizer(self): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))] + + for i, param_group in enumerate(self.optimizer.param_groups): + see_memory_usage(f'before initializing group {i}', force=True) + + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # grab the original list + trainable_parameters = [param for param in param_group['params'] if param.requires_grad] + if param_group.get('group_dtypes'): + self.bf16_groups.append(None) + self.bf16_groups_flat.append(None) + self.bf16_partitioned_groups.append(None) + self._setup_for_fp8_params(dp_world_size, i, param_group, partition_id, trainable_parameters) + else: + for dtype in self.fp8_param_groups_dict: + self.fp8_param_groups_dict[dtype]["param_groups"].append(None) + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append(None) + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(None) + self._setup_for_bf16_params(dp_world_size, i, param_group, partition_id, trainable_parameters) + + # update optimizer param groups to reference fp32 params partition + param_group['params'] = [self.fp32_groups_flat_partition[i]] + + see_memory_usage(f'after initializing group {i}', force=True) + + see_memory_usage('before initialize_optimizer', force=True) + self.initialize_optimizer_states() + see_memory_usage('end initialize_optimizer', force=True) + + if self.immediate_grad_update: + self.create_grad_acc_hooks() + + # Need optimizer states initialized before linking lp to optimizer state + self._link_all_hp_params() + self._hp_optimizer_states_linked = False + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _setup_for_bf16_params(self, dp_world_size, i, param_group, partition_id, trainable_parameters): + self.bf16_groups.append(trainable_parameters) + + # create flat bf16 params + self.bf16_groups_flat.append( + self._flatten_dense_tensors_aligned(self.bf16_groups[i], self.nccl_start_alignment_factor * dp_world_size)) + + # Make bf16 params point to flat tensor storage + self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) + + # divide flat weights into equal sized partitions + partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + bf16_dp_partitions = [ + self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) + for dp_index in range(dp_world_size) + ] + self.bf16_partitioned_groups.append(bf16_dp_partitions) + + # create fp32 params partition + self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.bf16_groups[i]] + + # create fp32 gradients + assert self.grad_acc_dtype in [torch.float32, torch.bfloat16] + self.fp32_groups_gradients_flat.append(torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype)) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.bf16_groups_flat[i].numel() - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + + def _setup_for_fp8_params(self, dp_world_size, i, param_group, partition_id, trainable_parameters): + assert param_group.get('calculate_statistics_fn') is not None, \ + "calculate_statistics_fn wasn't provided" + dtypes = param_group['group_dtypes'] + for dtype in dtypes: + assert dtype in [torch.float8_e5m2, torch.float8_e4m3fn], \ + f'{dtype} is not supported' + calculate_statistics_fn = param_group['calculate_statistics_fn'] + calculate_statistics_fn(trainable_parameters) + + for p in trainable_parameters: + p.all_data = {} + p.all_data[dtypes[0]] = p.cast_to(dtypes[0], p.data) + for j in range(1, len(dtypes)): + self.fp8_param_groups_dict[dtypes[j]]["param_groups"].append([]) + for p in trainable_parameters: + p.all_data[dtypes[j]] = p.cast_to(dtypes[j], p.data) + self.fp8_param_groups_dict[dtypes[j]]["param_groups"][i].append(p.all_data[dtypes[j]]) + for p in trainable_parameters: + p.data = p.all_data[dtypes[0]] + self.fp8_param_groups_dict[dtypes[0]]["param_groups"].append(trainable_parameters) + + # create flat fp8 params + first_iter = True + for dtype in dtypes: + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append( + self._flatten_dense_tensors_aligned(self.fp8_param_groups_dict[dtype]["param_groups"][i], + self.nccl_start_alignment_factor * dp_world_size)) + # Make fp8 params point to flat tensor storage + self._update_storage_to_flattened_tensor( + tensor_list=self.fp8_param_groups_dict[dtype]["param_groups"][i], + flat_tensor=self.fp8_param_groups_dict[dtype]["param_groups_flat"][i]) + # divide flat weights into equal sized partitions + partition_size = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel() // dp_world_size + fp8_dp_partitions = [ + self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].narrow(0, dp_index * partition_size, + partition_size) + for dp_index in range(dp_world_size) + ] + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(fp8_dp_partitions) + if first_iter: + # create fp32 params partition + self.fp32_groups_flat_partition.append(fp8_dp_partitions[partition_id].clone().float().detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.fp8_param_groups_dict[dtype]["param_groups"][i]] + + # create fp32 gradients + assert self.grad_acc_dtype in [torch.float32, torch.bfloat16] + self.fp32_groups_gradients_flat.append( + torch.zeros_like(self.fp8_param_groups_dict[dtype]["param_groups_flat"][i], + dtype=self.grad_acc_dtype)) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * + len(self.fp8_param_groups_dict[dtype]["param_groups"][i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel( + ) - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + first_iter = False + + def _enable_universal_checkpoint(self): + for i, lp_param_group in enumerate(self.bf16_groups): + if lp_param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + lp_param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, param_group in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + for lp in params: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + for i, param_group in enumerate(self.optimizer.param_groups): + # Link bf16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + flat_hp_partition = self.fp32_groups_flat_partition[i] + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + partition_size = self.fp8_param_groups_dict[dtypes[0]]["param_groups_flat"][i].numel() // dp_world_size + lp_param_list = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + lp_param_list = self.bf16_groups[i] + link_hp_params(lp_param_list=lp_param_list, + flat_hp_partition=flat_hp_partition, + gradient_dict=self.fp32_groups_gradient_dict, + offload_gradient_dict=None, + use_offload=False, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + dp_group=self.real_dp_process_group[i]) + + def _lazy_init_hp_params_optimizer_state(self): + if not self._hp_optimizer_states_linked: + for i, param_group in enumerate(self.optimizer.param_groups): + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + lazy_init_hp_params_optimizer_state(params, self.fp32_groups_flat_partition[i], self.optimizer.state) + self._hp_optimizer_states_linked = True + + def initialize_optimizer_states(self): + """Take an optimizer step with zero-valued gradients to allocate internal + optimizer state. + + This helps prevent memory fragmentation by allocating optimizer state at the + beginning of training instead of after activations have been allocated. + """ + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + if self.grad_acc_dtype is torch.bfloat16: + param_partition.grad = grad_partition.to(param_partition.dtype) + else: + param_partition.grad = grad_partition + + self.optimizer.step() + + if self.grad_acc_dtype is torch.bfloat16: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + self.clear_hp_grads() + + def _split_flat_tensor(self, flat_tensor, num_elem_list): + assert sum(num_elem_list) <= flat_tensor.numel() + tensor_list = [] + offset = 0 + for num_elem in num_elem_list: + dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem) + tensor_list.append(dense_tensor) + offset += num_elem + + return tensor_list + + def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor): + updated_params = self.unflatten(flat_tensor, tensor_list) + # TODO: SW-179781 need to remove the below WA once SW-179780 is resolved + get_accelerator().synchronize() + for p, q in zip(tensor_list, updated_params): + p.data = q.data + if hasattr(p, 'all_data'): + p.all_data[p.dtype] = q.data + + def _flatten_dense_tensors_aligned(self, tensor_list, alignment): + return self.flatten(align_dense_tensors(tensor_list, alignment)) + + @torch.no_grad() + def step(self, closure=None): + if closure is not None: + raise NotImplementedError(f'{self.__class__} does not support closure.') + + all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(), + mpu=self.mpu, + norm_type=self.norm_type) + self._global_grad_norm = all_groups_norm + + assert all_groups_norm > 0. + if self.clip_grad > 0.: + clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True), + max_norm=self.clip_grad, + global_norm=all_groups_norm, + mpu=self.mpu) + + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + if self.grad_acc_dtype is torch.bfloat16: + param_partition.grad = grad_partition.to(param_partition.dtype) + else: + param_partition.grad = grad_partition + + self.optimizer.step() + + if self.grad_acc_dtype is torch.bfloat16: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + # We need to link optimizer state after the first step() call + self._lazy_init_hp_params_optimizer_state() + + self.update_lp_params() + + self.clear_hp_grads() + + def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs): + """Perform a backward pass and copy the low-precision gradients to the + high-precision copy. + + If self.immediate_grad_update is false and update_hp_grads is true we copy/accumulate to the high-precision grads now + to prevent accumulating in the bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1) + + The low-precision grads are deallocated during this procedure. + """ + self.clear_lp_grads() + loss.backward(**bwd_kwargs) + + if not self.immediate_grad_update and update_hp_grads: + self.update_hp_grads(clear_lp_grads=clear_lp_grads) + + @torch.no_grad() + def update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): + if lp.grad is None: + return + + hp_grad = self.fp32_groups_gradients[group_idx][param_idx] + assert hp_grad is not None, \ + f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{group_idx}][{param_idx}]' + + if hasattr(lp, 'hp_grad'): + grad = lp.hp_grad + else: + grad = lp.grad + hp_grad.data.add_(grad.data.to(hp_grad.dtype).view(hp_grad.shape)) + lp._hp_grad = hp_grad + self.fp32_groups_has_gradients[group_idx][param_idx] = True + + # clear gradients + lp.grad = None + if hasattr(lp, 'hp_grad'): + lp.hp_grad = None + + @torch.no_grad() + def update_hp_grads(self, clear_lp_grads=False): + if self.immediate_grad_update: + return + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + self.update_hp_grad(lp, i, j, clear_lp_grads) + + @torch.no_grad() + def get_grads_for_reduction(self): + return self.fp32_groups_gradients_flat, {} + + @torch.no_grad() + def get_grads_for_norm(self, for_clipping=False): + grads = [] + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if not for_clipping: + if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: + continue + + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)): + continue + + if not self.fp32_groups_has_gradients[i][j]: + continue + + grads.append(self.fp32_groups_gradients[i][j]) + return grads + + @torch.no_grad() + def update_lp_params(self): + dtypes = [] + for i, (partition_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if partition_group is None: + calculate_statistics_fn = self.optimizer.param_groups[i]['calculate_statistics_fn'] + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + calculate_statistics_fn(params) + for p in params: + if p._hp_mapping is not None: + for dtype in dtypes: + fp8_partition_data_temp = p.cast_from(p._hp_mapping.hp_fragment.data, dtype) + p.all_data[dtype].flatten().narrow( + 0, p._hp_mapping.lp_fragment_address.start, + p._hp_mapping.lp_fragment_address.numel).copy_(fp8_partition_data_temp) + else: + partition_group[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + #TODO: SW-90304 call all_gather_dp_groups with async_op=true if zero optimizer hpu_use_async_collectives is enabled + if any(g is not None for g in self.bf16_groups_flat): + all_gather_dp_groups(groups_flat=self.bf16_groups_flat, + partitioned_param_groups=self.bf16_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + for dtype in dtypes: + param_groups_flat = self.fp8_param_groups_dict[dtype]["param_groups_flat"] + param_partitioned_groups = self.fp8_param_groups_dict[dtype]["param_partitioned_groups"] + if any(g is not None for g in param_groups_flat): + all_gather_dp_groups(groups_flat=param_groups_flat, + partitioned_param_groups=param_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + + def clear_hp_grads(self): + for flat_gradients in self.fp32_groups_gradients_flat: + flat_gradients.zero_() + + for i, group in enumerate(self.fp32_groups_gradients): + self.fp32_groups_has_gradients[i] = [False] * len(group) + + def clear_lp_grads(self): + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for param in group: + param.grad = None + if hasattr(param, 'hp_grad'): + param.hp_grad = None + + def state_dict(self): + state_dict = {} + state_dict[CLIP_GRAD] = self.clip_grad + state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() + state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition + state_dict[GROUP_PADDINGS] = self.group_paddings + state_dict[PARTITION_COUNT] = self.partition_count + state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings + + return state_dict + + # Restore base optimizer fp32 weights from bfloat16 weights + def _restore_from_bit16_weights(self): + for i, group in enumerate(self.bf16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + for j, (partitioned_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + if partitioned_group is None: + dtypes = self.optimizer.param_groups[j]['group_dtypes'] + partitioned_group = self.fp8_param_groups_dict[dtypes[0]]["param_partitioned_groups"][j] + fp32_partition.data.copy_(partitioned_group[partition_id].data) + + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + def load_state_dict(self, + state_dict_list, + checkpoint_folder, + load_optimizer_states=True, + load_from_fp32_weights=False, + load_serial=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) + + def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): + + dp_rank = dist.get_rank(group=self.dp_process_group) + current_rank_sd = state_dict_list[dp_rank] + + ckpt_version = current_rank_sd.get(DS_VERSION, False) + assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" + ckpt_version = pkg_version.parse(ckpt_version) + + self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) + + if load_optimizer_states: + self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + + if load_from_fp32_weights: + for current, saved in zip(self.fp32_groups_flat_partition, + current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): + src_tensor = _get_padded_tensor(saved, current.numel()) + current.data.copy_(src_tensor.data) + + if load_optimizer_states: + self._link_all_hp_params() + + def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): + self._load_hp_checkpoint_state(checkpoint_folder) + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def _load_hp_checkpoint_state(self, checkpoint_dir): + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + tp_world_size = self.mpu.get_slice_parallel_world_size() + + for i, param_group in enumerate(self.optimizer.param_groups): + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + group = self.bf16_groups[i] + for lp in group: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + + def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): + assert self.immediate_grad_update + self.update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) + + def create_grad_acc_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.bf16_groups): + if param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, param in enumerate(param_group): + if param.requires_grad: + + def wrapper(param, i, j): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def accumulate_hp_grads_and_remove_lp(*notneeded): + self.accumulate_hp_grads_and_remove_lp(param, i, j) + + grad_acc.register_hook(accumulate_hp_grads_and_remove_lp) + self.grad_accs.append(grad_acc) + + wrapper(param, i, j) + + +def _get_padded_tensor(src_tensor, size): + if src_tensor.numel() >= size: + return src_tensor + padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) + slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) + slice_tensor.data.copy_(src_tensor.data) + return padded_tensor diff --git a/deepspeed/runtime/hpu_utils.py b/deepspeed/runtime/hpu_utils.py new file mode 100644 index 000000000000..5a7ca4d4b97f --- /dev/null +++ b/deepspeed/runtime/hpu_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.accelerator import get_accelerator + + +def get_use_hpu(): + return get_accelerator().device_name() == "hpu" diff --git a/deepspeed/runtime/hybrid_engine.py b/deepspeed/runtime/hybrid_engine.py index a991c4304563..8a6311bb6e83 100644 --- a/deepspeed/runtime/hybrid_engine.py +++ b/deepspeed/runtime/hybrid_engine.py @@ -17,16 +17,14 @@ from deepspeed.accelerator import get_accelerator from torch import nn from deepspeed.utils import logger - -from deepspeed.ops.op_builder import InferenceBuilder - from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp + try: import transformers OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding except: OPTLearnedPositionalEmbedding = None -inference_cuda_module = None class DeepSpeedHybridEngine(DeepSpeedEngine): @@ -61,12 +59,8 @@ def __init__(self, args, model, **kwargs): self._total_batch_size = None self._gather_latency = 0 - global inference_cuda_module - if inference_cuda_module is None: - builder = InferenceBuilder() - inference_cuda_module = builder.load() - self.is_lora_fused = False + self.workspace = WorkspaceOp() def convert_to_linear_transposed(self, model): @@ -160,13 +154,13 @@ def unfuse_lora_weight_non_pinned(self): def retake_inference_cache(self): if self._config.hybrid_engine.release_inference_cache: - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.") gc.collect() get_accelerator().empty_cache() - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: raise RuntimeError("Unable to retake inference workspace.") @@ -269,7 +263,7 @@ def generate(self, *inputs, **kwargs): self.is_lora_fused = False if self._config.hybrid_engine.release_inference_cache: - inference_cuda_module.release_workspace() + self.workspace.release_workspace() gc.collect() get_accelerator().empty_cache() diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py index ef1c98a95c7b..5e0ef63a41ae 100644 --- a/deepspeed/runtime/pipe/engine.py +++ b/deepspeed/runtime/pipe/engine.py @@ -4,6 +4,7 @@ # DeepSpeed Team from types import MethodType +from collections import OrderedDict import torch from deepspeed import comm as dist @@ -151,11 +152,33 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): if self.global_rank != min(d['ranks']): tied_params += sum(p.numel() for p in d['module'].parameters()) unique_params -= tied_params - params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) + + # Use Int32 representation instead of Int64 for calclations. + # num_param division & modulo after all reduce should be lower than MAX Int32. + # Using this value will be safe if used with less than ~2000 devices. + # Int32Max > all_reduce_group*chunk_size + chunk_size = 10**6 + + num_params_quotient = num_params // chunk_size + num_params_remainder = num_params % chunk_size + + unique_params_quotient = unique_params // chunk_size + unique_params_remainder = unique_params % chunk_size + + assert (unique_params_quotient * chunk_size + + unique_params_remainder) == unique_params, "Value mismatch after Int64 splitting" + assert (num_params_quotient * chunk_size + + num_params_remainder) == num_params, "Value mismatch after Int64 splitting" + + params_tensor = torch.IntTensor( + data=[num_params_quotient, num_params_remainder, unique_params_quotient, unique_params_remainder]).to( + self.device) + dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) params_tensor = params_tensor.tolist() - total_params = params_tensor[0] - unique_params = params_tensor[1] + total_params = params_tensor[0] * chunk_size + params_tensor[1] + unique_params = params_tensor[2] * chunk_size + params_tensor[3] + if self.grid.data_parallel_id == 0: logger.info(f'RANK={self.global_rank} ' f'STAGE={self.stage_id} ' @@ -189,14 +212,25 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.pipe_partition_grad_meta_cache = None self.grad_partition_grad_layer_meta_cache = None + self.pipe_partition_input_meta_cache = None + self.pipe_partition_output_meta_cache = None + self.pipe_partition_grad_meta_cache = None + self.grad_partition_grad_layer_meta_cache = None + #stores the loss for the current micro batch being processed self.loss = torch.tensor(0.0).to(self.device) #stores the loss for the entire batch self.total_loss = None + self.total_additional_losses = None self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device) self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device) + # stores aggregated-DP train final loss and aggregated-DP additional losses, if any + # additional losses are stored as dict: {loss-name: agg-loss} + self.agg_train_loss = None + self.agg_additional_losses = None + if self._config.pipeline['activation_checkpoint_interval'] > 0: self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval'] # set use_reentrant default to True. @@ -284,10 +318,7 @@ def _exec_reduce_grads(self): self._force_grad_boundary = False def _bf16_reduce_grads(self): - # Make our own list of gradients from the optimizer's FP32 grads - grads = [] - self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(), - elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) + self.buffered_allreduce_fallback(grads=None, elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) def _reserve_pipe_buffers(self, num_buffers): """Ensure that each pipeline buffer has at least ``num_buffers`` slots. @@ -363,6 +394,7 @@ def train_batch(self, data_iter=None): self.module.train() self.total_loss = None + self.total_additional_losses = None self._compute_loss = True # Do the work @@ -371,7 +403,9 @@ def train_batch(self, data_iter=None): stages=self.num_stages, stage_id=self.stage_id) self._exec_schedule(sched) - self.agg_train_loss = self._aggregate_total_loss() + + with torch.no_grad(): + self.agg_train_loss = self._aggregate_total_loss() self.timers(TRAIN_BATCH_TIMER).stop() @@ -380,10 +414,12 @@ def train_batch(self, data_iter=None): elapsed = self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) / 1000.0 iter_time = elapsed / self.steps_per_print() tput = self.train_batch_size() / iter_time - print(f'steps: {self.global_steps} ' - f'loss: {self.agg_train_loss:0.4f} ' - f'iter time (s): {iter_time:0.3f} ' - f'samples/sec: {tput:0.3f}') + log_str = f'steps: {self.global_steps} loss: {self.agg_train_loss:0.4f} ' + if self.agg_additional_losses is not None: + for loss_name, loss_value in self.agg_additional_losses.items(): + log_str += f'{loss_name}: {loss_value.item():0.4f} ' + log_str += f'iter time (s): {iter_time:0.3f} samples/sec: {tput:0.3f}' + print(log_str) else: self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) @@ -463,12 +499,11 @@ def eval_batch(self, micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches # Do the work - sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, - stages=self.num_stages, - stage_id=self.stage_id) + sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=self.num_stages, stage_id=self.stage_id) # prevent dead-lock with multiple evals sequence - dist.barrier() + if not get_accelerator().device_name() == "hpu": + dist.barrier() with torch.no_grad(): self._exec_schedule(sched) @@ -565,29 +600,66 @@ def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32): def _aggregate_total_loss(self): # Scale loss, average among DP ranks, and bcast loss to the rest of my DP group if self.is_last_stage(): + # Scale loss and additional losses, if any loss = self._scale_loss_by_gas(self.total_loss) - self.dp_group_loss = loss.clone().detach() + self.agg_additional_losses = self.total_additional_losses + if self.agg_additional_losses is not None: + self.agg_additional_losses = OrderedDict({ + loss_name: self._scale_loss_by_gas(_loss.clone().detach()) + for loss_name, _loss in self.agg_additional_losses.items() + }) - ## Average loss across all data-parallel groups + self.dp_group_loss = loss.clone().detach() agg_loss = self.dp_group_loss.clone().detach() #print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True) + + # Average loss across all data-parallel groups if self.is_data_parallel: - dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) - agg_loss /= self.dp_world_size + if self.agg_additional_losses is None: + dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) + agg_loss /= self.dp_world_size + else: + # use a single reduce op for agg_loss and additional losses, if any + assert '__train_loss__' not in self.agg_additional_losses.keys() + tensors = OrderedDict({'__train_loss__': agg_loss}) + tensors.update(self.agg_additional_losses.items()) + flat_tensor = torch.cat([t.clone().reshape(-1).detach() for t in tensors.values()]) + dist.all_reduce(flat_tensor, group=self.mpu.get_data_parallel_group()) + flat_tensor /= self.dp_world_size + offset = 0 + reduced_tensor = {} + for name, t in tensors.items(): + n_elem = t.numel() + reduced_tensor[name] = flat_tensor[offset:offset + n_elem].clone().detach().reshape(t.shape) + offset += n_elem + agg_loss = reduced_tensor['__train_loss__'] + self.agg_additional_losses = OrderedDict( + {name: reduced_tensor[name] + for name in self.agg_additional_losses.keys()}) assert self.global_rank in self.grid.pp_group - losses = torch.stack([self.dp_group_loss, agg_loss]).float() + losses = [self.dp_group_loss, agg_loss] + if self.agg_additional_losses is not None: + losses += list(self.agg_additional_losses.values()) + losses = torch.stack(losses).float() if self.is_pipe_parallel: dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group()) else: # Get loss from last stage src_rank = self.grid.stage_to_global(self.num_stages - 1) assert src_rank in self.grid.pp_group - losses = torch.Tensor([0., 0.]).to(self.device) + # losses to reduce are: dp_group_loss, agg_loss, model additional losses + # therefore: 2 + n_additional_losses + additional_losses = self.module.get_additional_losses() + n_additional_losses = 0 if additional_losses is None else len(additional_losses) + losses = torch.Tensor([0.] * (2 + n_additional_losses)).to(self.device) dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group()) self.dp_group_loss = losses[0].clone().detach() agg_loss = losses[1].clone().detach() - + if additional_losses is not None: + self.agg_additional_losses = OrderedDict( + {name: losses[2 + i].clone().detach() + for i, name in enumerate(additional_losses.keys())}) return agg_loss def set_dataloader(self, loader): @@ -665,7 +737,8 @@ def _exec_forward_pass(self, buffer_id): self.pipe_partition_input_meta_cache = inputs[0].to('cpu') part_input = PartitionedTensor.from_meta(meta=self.pipe_partition_input_meta_cache, local_part=inputs[1], - group=self.grid.get_slice_parallel_group()) + group=self.grid.get_slice_parallel_group(), + device=inputs[0].device) inputs = (part_input.full(), *inputs[2:]) inputs[0].requires_grad = True @@ -697,7 +770,7 @@ def _exec_forward_pass(self, buffer_id): raise ValueError("expecting a tensor or a tuple of tensors") part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group()) # Clear the large output data, but save the computation graph - first_output.data = torch.zeros(1) + first_output.data = torch.zeros(1, device=first_output.data.device) self.pipe_buffers['output_tensors'][buffer_id] = first_output # Inject the partitioned tensor into the output before sending outputs = (part.to_meta(), part.data(), *outputs_tail) @@ -715,19 +788,34 @@ def _exec_forward_pass(self, buffer_id): self.loss = outputs if self.eval_return_logits: self.outputs = outputs + if isinstance(self.loss, torch.Tensor): self.fwd_outputs.append(self.loss.detach()) - - if self.total_loss is None: - self.total_loss = torch.zeros_like(self.loss) - self.total_loss += self.loss.detach() else: self.fwd_outputs.append([l.detach() for l in self.loss]) - if self.total_loss is None: - self.total_loss = [torch.zeros_like(l) for l in self.loss] - for idx, l in enumerate(self.loss): - self.total_loss[idx] += l.detach() + def add_to_total_loss(_total_loss, _loss): + if isinstance(_loss, torch.Tensor): + if _total_loss is None: + _total_loss = torch.zeros_like(_loss) + _total_loss += _loss.detach() + else: + if _total_loss is None: + _total_loss = [torch.zeros_like(_l) for _l in _loss] + for _idx, _l in enumerate(_loss): + _total_loss[_idx] += _l.detach() + return _total_loss + + self.total_loss = add_to_total_loss(self.total_loss, self.loss) + + # aggregate additional losses across gradient accumulation steps + additional_losses = self.module.get_additional_losses() + if additional_losses is not None: + if self.total_additional_losses is None: + self.total_additional_losses = OrderedDict() + for name, loss in additional_losses.items(): + total = self.total_additional_losses[name] if name in self.total_additional_losses else None + self.total_additional_losses[name] = add_to_total_loss(total, loss) def _exec_backward_pass(self, buffer_id): assert self.optimizer is not None, "must provide optimizer during " \ @@ -758,7 +846,8 @@ def _exec_backward_pass(self, buffer_id): self.pipe_partition_output_meta_cache = outputs[0].to('cpu') part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_output_meta_cache, local_part=outputs[1], - group=self.grid.get_slice_parallel_group()) + group=self.grid.get_slice_parallel_group(), + device=outputs[0].device) self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full() outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:]) else: @@ -773,7 +862,8 @@ def _exec_backward_pass(self, buffer_id): self.grad_partition_grad_layer_meta_cache = self.grad_layer[0].to('cpu') part_grad = PartitionedTensor.from_meta(meta=self.grad_partition_grad_layer_meta_cache, local_part=self.grad_layer[1], - group=self.grid.get_slice_parallel_group()) + group=self.grid.get_slice_parallel_group(), + device=self.grad_layer[0].device) grad_tensors = (part_grad.full(), *grad_tensors[2:]) part_grad = None #print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}') @@ -792,7 +882,8 @@ def _exec_backward_pass(self, buffer_id): if self.using_bf16_optimizer and not self.is_last_stage(): # manually call because we don't call optimizer.backward() - self.optimizer.update_hp_grads(clear_lp_grads=False) + if not self._config.bfloat16_immediate_grad_update: + self.optimizer.update_hp_grads(clear_lp_grads=False) # Free up the memory from the output of forward() self.pipe_buffers['output_tensors'][buffer_id] = None @@ -1089,6 +1180,10 @@ def _exec_recv_activations(self, buffer_id): buffer = self.meta_buffer p2p.recv(buffer, self.prev_stage) + + # Performing the clones in a different loop to reduce host dependency, + # and improve performance. + for idx, buffer in enumerate(self.pipe_recv_buf): recvd[idx] = buffer.clone().detach() # NCCL does not like to send torch.BoolTensor types, so un-cast the @@ -1118,7 +1213,8 @@ def _exec_recv_grads(self, buffer_id): self.pipe_partition_grad_meta_cache = outputs[0].to('cpu') part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_grad_meta_cache, local_part=outputs[1], - group=self.grid.get_slice_parallel_group()) + group=self.grid.get_slice_parallel_group(), + device=outputs[0].device) outputs[0].data = part_output.full() outputs = (outputs[0], *outputs[2:]) # save for backward @@ -1332,7 +1428,7 @@ def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None, f strict (bool, optional): Strict state loading. Defaults to True. """ assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism" - state_dict = checkpoint['module'] + state_dict = checkpoint if self.has_moe_layers else checkpoint['module'] if (state_dict is not None) and (not isinstance(state_dict, str)): super().load_module_state_dict(state_dict, strict) return @@ -1371,3 +1467,6 @@ def _exec_schedule(self, pipe_schedule): # Equivalent to: self._exec_forward_pass(buffer_id=0) self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self) self._exec_instr(**cmd.kwargs) + + def get_additional_losses(self): + return self.agg_additional_losses diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py index c11379b0a0d7..8036faef72ee 100644 --- a/deepspeed/runtime/pipe/module.py +++ b/deepspeed/runtime/pipe/module.py @@ -634,3 +634,10 @@ def _is_checkpointable(self, funcs): return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs) params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)] return any(len(list(p)) > 0 for p in params) + + def get_additional_losses(self): + """ Returns model specific additional losses for reporting + + Return a dictionary of {"loss name": loss_value} or None if no additional losses. + """ + return None diff --git a/deepspeed/runtime/state_dict_factory.py b/deepspeed/runtime/state_dict_factory.py index c2db85d1ba58..469e2fe9f55f 100755 --- a/deepspeed/runtime/state_dict_factory.py +++ b/deepspeed/runtime/state_dict_factory.py @@ -335,10 +335,26 @@ def merge_state_dict(self, new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: - if quantize and "mlp.dense_h_to_4h.weight" in key: - value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "lm_head.weight" in key: new_client_sd[key] = torch.cat(value_list, axis=0) + elif "mlp.dense_h_to_4h.weight" in key: + if quantize: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + # HACK: + # Following code checks if h_to_4h is swiglu. This is required in order to merge correctly. + # The correct way is to add metadata to state_dict that provides info on how to merge/split each tensor. + size_h_to_4h = sd_list[0]["mlp.dense_h_to_4h.weight"].numel() + size_4h_to_h = sd_list[0]["mlp.dense_4h_to_h.weight"].numel() + if size_h_to_4h == size_4h_to_h: + new_client_sd[key] = torch.cat(value_list, axis=0) + elif size_h_to_4h == 2 * size_4h_to_h: + chunked_slices = [torch.chunk(v, 2, dim=0) for v in value_list] + merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=0) + merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=0) + new_client_sd[key] = torch.cat([merged_chunks_0, merged_chunks_1], dim=0) + else: + assert False, f"Unsupported slices size of mlp.dense_h_to_4h.weight={size_h_to_4h} " \ + f"mlp.dense_4h_to_h.weight={size_4h_to_h}" else: new_client_sd[key] = value_list[0] if quantize: @@ -383,12 +399,27 @@ def split_state_dict(self, q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key \ + or "lm_head.weight" in key: assert value.shape[0] % num_to_split == 0 split_size = value.shape[0] // num_to_split - if quantize and "mlp.dense_h_to_4h.weight" in key: + new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] + elif "mlp.dense_h_to_4h.weight" in key: + assert value.shape[0] % num_to_split == 0 + split_size = value.shape[0] // num_to_split + if quantize: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] + # HACK: + # Following code checks if h_to_4h is swiglu. + # The correct way to check is to add metadata to state_dict that provides info on + # how to merge/split each tensor. + # Currently, swiglu split is NOT supported as it requires handling of all chunks. + size_h_to_4h = value.numel() + size_4h_to_h = client_sd["mlp.dense_4h_to_h.weight"].numel() + assert size_h_to_4h == size_4h_to_h, \ + f"Split not supported dense_h_to_4h.weight size={size_h_to_4h} " \ + f"and dense_4h_to_h.weight size={size_4h_to_h}" new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] else: new_client_sd[key] = value diff --git a/deepspeed/runtime/utils.py b/deepspeed/runtime/utils.py index d1ebe4b2f83d..a3855d065ec8 100755 --- a/deepspeed/runtime/utils.py +++ b/deepspeed/runtime/utils.py @@ -25,6 +25,8 @@ from torch import inf from deepspeed.utils import groups, logger +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_rank, bwc_pipeline_parallel_world_size, + bwc_pipeline_parallel_group) from deepspeed.runtime.constants import PIPE_REPLICATED from numpy import prod from deepspeed.accelerator import get_accelerator @@ -117,44 +119,6 @@ def is_model_parallel_parameter(p) -> bool: return False -def bwc_tensor_model_parallel_rank(mpu=None): - """Backwards-compatible way of querying the tensor model parallel rank from - an ``mpu`` object. - - *Tensor* model parallelism means that tensors are physically split across - processes. This contrasts with *pipeline* model parallelism, in which the - layers are partitioned but tensors left intact. - - The API for tensor model parallelism has changed across versions and this - helper provides a best-effort implementation across versions of ``mpu`` - objects. The preferred mechanism is - ``mpu.get_tensor_model_parallel_rank()``. - - This should "just work" with both Megatron-LM and DeepSpeed's pipeline - parallelism. - - Args: - mpu (model parallel unit, optional): The tensor model parallel rank. - If ``mpu=None``, returns 0. Defaults to ``None``. - - Returns: - int: the rank - """ - if mpu is None: - # No model parallelism in easy :) - return 0 - - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - # New Megatron and DeepSpeed convention (post pipeline-parallelism release) - return mpu.get_tensor_model_parallel_rank() - elif hasattr(mpu, 'get_slice_parallel_rank'): - # Some DeepSpeed + pipeline parallelism versions - return mpu.get_slice_parallel_rank() - else: - # Deprecated Megatron and DeepSpeed convention - return mpu.get_model_parallel_rank() - - def copy_to_device(item, device, criterion_func): """ Return a copy of tensor on specified device. @@ -205,6 +169,17 @@ def move_to_device(item, device, criterion_func): return item +def get_norm_with_moe_layers_fast(all_groups_norm, group): + # This implementation standardizes the grad_norm across ranks. A more precise implementation can be found in 'get_norm_with_moe_layers'. + # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce + scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=group)) + scaled_norm_tensor = torch.tensor(scaled_norm, device=get_accelerator().current_device(), dtype=torch.float) + dist.all_reduce(scaled_norm_tensor, group=group) + all_groups_norm = scaled_norm_tensor.item() + #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") + return all_groups_norm + + class CheckOverflow(object): '''Checks for overflow in gradient across parallel process''' @@ -369,8 +344,7 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): for p in parameters: all_norms.append(p.grad.data.abs().max().float()) total_norm = torch.stack(all_norms).max() - origin_device = total_norm.device.type - total_norm = total_norm.to(get_accelerator().device_name()) + total_norm = total_norm.to(get_accelerator().current_device_name()) # Take max across all GPUs. if mpu is not None: dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) @@ -387,9 +361,8 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): if len(all_norms) > 0: total_norm = torch.stack(all_norms).square().sum().float() else: - total_norm = torch.FloatTensor([0.0]).to(parameters[0].device) - origin_device = total_norm.device.type - total_norm = total_norm.to(get_accelerator().device_name()) + total_norm = get_accelerator().FloatTensor([0.0]) + total_norm = total_norm.to(get_accelerator().current_device_name()) # Sum across all model parallel GPUs. if mpu is not None: dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) @@ -402,11 +375,11 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): dist.all_reduce(scaled_norm_tensor, group=pg) total_norm = scaled_norm_tensor - total_norm = total_norm.to(origin_device) + total_norm = total_norm.to(parameters[0].device) - max_norm = torch.tensor([float(max_norm)], device=parameters[0].device) + max_norm = torch.tensor([float(max_norm)], device=total_norm.device) clip_coef = max_norm / (total_norm + 1e-6) - tmp_tensor = torch.tensor([1.0], device=parameters[0].device) + tmp_tensor = torch.tensor([1.0], device=clip_coef.device) clip_coef = torch.min(tmp_tensor, clip_coef) for p in parameters: p.grad.data.mul_(clip_coef) @@ -753,6 +726,8 @@ def memory_status(msg, print_rank=-1, reset_max=False): new_alloced = get_accelerator().memory_allocated() new_cached = get_accelerator().memory_cached() + new_alloced = torch_memory_allocated() # noqa: F821 + delta_alloced = new_alloced - mem_alloced delta_cached = new_cached - mem_cached @@ -861,7 +836,7 @@ def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, ep return global_grad_norm -def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=False): +def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=False, moe_ep_group=None): """Get norm of an iterable of tensors. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and @@ -879,37 +854,64 @@ def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=F assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors' norm_type = float(norm_type) + all_norms = [] if norm_type == inf: - total_norm = max(t.data.abs().max() for t in input_tensors) - total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + for t in input_tensors: + all_norms.append(t.data.abs().max().float()) + total_norm = torch.stack(all_norms).max() + device_total_norm = total_norm.to(get_accelerator().current_device_name()) + # Max across model parallel if mpu is not None: - dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) - total_norm = total_norm_cuda[0].item() + # For MoE grads, max over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, max over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=bwc_pipeline_parallel_group(mpu)) + + # MoE grads: max across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device) else: - if use_graph: - if 'norm_tensors_compute_buffer' not in graph_cache: - graph_cache['norm_tensors_compute_buffer'] = [t.data.float().norm(norm_type) for t in input_tensors] - compute_buffer = graph_cache['norm_tensors_compute_buffer'] - def _norm_tensors(tensor_list, _compute_buffer, _norm_type): - for i, t in enumerate(tensor_list): - _compute_buffer[i].data.copy_(t.data.float().norm(_norm_type)**_norm_type) - if i != 0: - _compute_buffer[0].data.add_(_compute_buffer[i].data) + if 'norm_tensors_compute_buffer' not in graph_cache or len( + graph_cache['norm_tensors_compute_buffer']) != len(input_tensors): + graph_cache['norm_tensors_compute_buffer'] = [ + torch.empty([], dtype=torch.float, device=get_accelerator().current_device_name()) + for t in input_tensors + ] + compute_buffer = graph_cache['norm_tensors_compute_buffer'] - graph_process(False, _norm_tensors, input_tensors, compute_buffer, norm_type) + def _norm_tensors(tensor_list, _compute_buffer, _norm_type): + for i, t in enumerate(tensor_list): + _compute_buffer[i].data.copy_(t.data.float().norm(_norm_type)**_norm_type) + if i != 0: + _compute_buffer[0].data.add_(_compute_buffer[i].data) - total_norm = compute_buffer[0] + if use_graph: + graph_process(False, _norm_tensors, input_tensors, compute_buffer, norm_type) else: - total_norm = sum([t.data.float().norm(norm_type).item()**norm_type for t in input_tensors]) + _norm_tensors(input_tensors, compute_buffer, norm_type) + + device_total_norm = compute_buffer[0].float().detach() - total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]).detach() + # Sum across model parallel if mpu is not None: - dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) - total_norm = total_norm_cuda[0].item()**(1. / norm_type) + # For MoE grads, sum over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, sum over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=bwc_pipeline_parallel_group(mpu)) - if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: - total_norm = -1 + # MoE grads: sum across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device).pow(1. / norm_type) + + inf_or_nan = total_norm.isinf().logical_or(total_norm.isnan()) + total_norm.masked_fill_(inf_or_nan, -1) return total_norm @@ -964,6 +966,8 @@ def align_dense_tensors(tensor_list, alignment): def all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group): for group_id, (group_flat, partitioned_params) in enumerate(zip(groups_flat, partitioned_param_groups)): + if group_flat is None: + continue partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) if dp_world_size == 1: @@ -979,6 +983,8 @@ def all_gather_dp_groups(groups_flat, partitioned_param_groups, dp_process_group return all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group) for group_id, partitioned_params in enumerate(partitioned_param_groups): + if partitioned_params is None: + continue # Sequential AllGather Best of both worlds partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) @@ -1048,3 +1054,45 @@ def required_torch_version(min_version=None, max_version=None): return False return True + + +def get_norm_with_moe_layers(non_expert_norm, mpu, expert_tensors, norm_type=2): + """ Compute the global norm with MoE experts + + Inputs: + non_expert_norm (float) : the calculated norm of the non-expert params + expert_tensors (Dict[ep_name, List[Tensor]): Dictionary of expert group name to list of grad tensors + norm_type (int): the norm to use + + Returns: + if norm is (-/+) inf, returns -1 + otherwise the global norm (float) + """ + + def to_tensor(v): + return get_accelerator().FloatTensor(float(v)).detach() + + group_norms = [non_expert_norm] + for exp_name, tensors in expert_tensors.items(): + group_norm = get_global_norm_of_tensors(input_tensors=tensors, + mpu=mpu, + norm_type=norm_type, + use_graph=False, + moe_ep_group=groups._get_expert_parallel_group(exp_name)) + group_norms.append(group_norm) + + # check if all norms are valid + group_norms = torch.stack([to_tensor(norm) for norm in group_norms]) + if group_norms.eq(-1).any(): + return -1 + + # combine norms + if norm_type == inf: + total_norm = group_norms.max().item() + else: + total_norm = group_norms.pow(norm_type).sum() + total_norm = total_norm.item()**(1. / norm_type) + if total_norm == float('inf') or total_norm == -float('inf'): + total_norm = -1 + + return total_norm diff --git a/deepspeed/runtime/zero/config.py b/deepspeed/runtime/zero/config.py index 76583c129cb9..8cf9f67cf295 100644 --- a/deepspeed/runtime/zero/config.py +++ b/deepspeed/runtime/zero/config.py @@ -20,6 +20,7 @@ "stage": [0|1|2], "stage3_max_live_parameters" : 1000000000, "stage3_max_reuse_distance" : 1000000000, + "stage3_use_all_reduce_for_fetch_params": [true|false], "allgather_partitions": [true|false], "use_multi_rank_bucket_allreduce": [true|false], "allgather_bucket_size": 500000000, @@ -197,7 +198,10 @@ class DeepSpeedZeroConfig(DeepSpeedConfigModel): ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference. """ - param_persistence_threshold: int = Field(pp_int(1e5), ge=0, alias="stage3_param_persistence_threshold") + #WA for SW-148986. Set param_persistence_threshold to 0 for zero inf + param_persistence_threshold: int = Field( + None, alias="stage3_param_persistence_threshold" + ) # None for dynamic default value (see validator `param_persistence_threshold_valid` below) """ Do not partition parameters smaller than this threshold. Smaller values use less memory, but can greatly increase communication (especially @@ -239,6 +243,12 @@ class DeepSpeedZeroConfig(DeepSpeedConfigModel): new_param="gather_16bit_weights_on_model_save") """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + use_all_reduce_for_fetch_params: bool = Field(False, alias="stage3_use_all_reduce_for_fetch_params") + """ + Use all_reduce op when fetching module parameters at stage3. This allows to significantly improve + performance by reducing the overhead of concatenation and slicing on the host. + """ + ignore_unused_parameters: bool = True """ Unused parameters in modules may be unexpected in static networks, but @@ -315,3 +325,14 @@ def offload_ratio_check(cls, values): if offload_config and offload_config.ratio < 1.0: assert values.get("stage") == ZeroStageEnum.weights, "Partial offloading only supported for ZeRO Stage 3." return values + + @validator("param_persistence_threshold") + def param_persistence_threshold_valid(cls, field_value, values): + if field_value is None: + assert ( + "offload_param" + in values), "DeepSpeedZeroConfig: 'offload_param' must be defined before 'param_persistence_threshold'" + field_value = pp_int(1e5) + if values["offload_param"] is not None and values["offload_param"].device != OffloadDeviceEnum.none: + field_value = 0 + return field_value diff --git a/deepspeed/runtime/zero/parameter_offload.py b/deepspeed/runtime/zero/parameter_offload.py index e9e79c2647fb..352d15f2d09b 100644 --- a/deepspeed/runtime/zero/parameter_offload.py +++ b/deepspeed/runtime/zero/parameter_offload.py @@ -438,10 +438,11 @@ def backward(ctx, *args): # post backward hook self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook)) - @torch.no_grad() def pre_sub_module_forward_function(self, sub_module): see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", force=False) - + prev_grad_state = torch.is_grad_enabled( + ) # we don't want to enable grad for sub modules fetching, yet the subfunction need to know if grad is enabled + torch.set_grad_enabled(False) global FWD_MODULE_STACK FWD_MODULE_STACK.append(sub_module) @@ -449,8 +450,8 @@ def pre_sub_module_forward_function(self, sub_module): param_coordinator.trace_prologue(sub_module) if param_coordinator.is_record_trace(): param_coordinator.record_module(sub_module) - param_coordinator.fetch_sub_module(sub_module, forward=True) - + param_coordinator.fetch_sub_module(sub_module, forward=prev_grad_state) + torch.set_grad_enabled(prev_grad_state) see_memory_usage(f"Before sub module function {sub_module.__class__.__name__} after fetch", force=False) @torch.no_grad() @@ -459,7 +460,7 @@ def post_sub_module_forward_function(self, sub_module): force=False) param_coordinator = self.get_param_coordinator(training=sub_module.training) - param_coordinator.release_sub_module(sub_module) + param_coordinator.release_sub_module(sub_module, backward=False) see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release", force=False) @@ -480,7 +481,7 @@ def post_sub_module_backward_function(self, sub_module): f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release", force=False) - self.get_param_coordinator(training=True).release_sub_module(sub_module) + self.get_param_coordinator(training=True).release_sub_module(sub_module, backward=True) see_memory_usage( f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release", diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py index 142259c1b7df..2f7a496e7472 100755 --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -23,7 +23,7 @@ from deepspeed.utils import groups import deepspeed -from ..utils import see_memory_usage +from ..utils import get_only_unique_item, see_memory_usage from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks, is_zero_param from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -237,7 +237,7 @@ def wrapped_fn(*args, **kwargs) -> Tensor: kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) tensor: Tensor = fn(*args, **kwargs) if tensor.is_floating_point(): - tensor.data = tensor.data.to(target_fp_dtype) + tensor = tensor.to(target_fp_dtype) #TODO(SW-180933) remove when SW-174023 is resolved return tensor @@ -646,6 +646,7 @@ def __init__( partitions: List[Tensor], world_size: int, use_secondary_tensor=False, + forward=False, quantization=None, ) -> None: self.allgather_handle = allgather_handle @@ -653,6 +654,7 @@ def __init__( self.partitions = partitions self.world_size = world_size self.use_secondary_tensor = use_secondary_tensor + self.forward = forward self.complete = False self.quantization = quantization @@ -683,7 +685,7 @@ def wait(self) -> None: assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" partitions: List[Tensor] = [] ds_tensor_numel = param.ds_tensor.ds_numel - if self.use_secondary_tensor: + if self.use_secondary_tensor and not self.forward: ds_tensor_numel *= param.ds_secondary_tensor_num_of_groups for rank in range(self.world_size): param_start = rank * ds_tensor_numel @@ -713,6 +715,31 @@ def wait(self) -> None: handle.wait() +class AllReduceCoalescedHandle: + + def __init__(self, handle, params: List[Parameter]) -> None: + self.handle = handle + self.params = params + self.complete = False + + for param in self.params: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to not be available") + + @instrument_w_nvtx + def wait(self) -> None: + if self.complete: + return + + instrument_w_nvtx(self.handle.wait)() + + for param in self.params: + assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" + param.ds_status = ZeroParamStatus.AVAILABLE + + self.complete = True + + class QuantizationInfo: # a placeholder object to store all quant related vars used in handles def __init__(self) -> None: @@ -947,7 +974,7 @@ def __init__( self.num_ranks_in_param_group = groups._get_zero_param_intra_parallel_group_world_size() self.num_param_groups = int(self.dp_world_size / self.num_ranks_in_param_group) self.rank_in_group = groups._get_zero_param_intra_parallel_rank_in_mygroup() - print_rank_0(f"hpZeRO group size: {self.num_ranks_in_param_group}", force=True) + print_rank_0(f"hpZeRO group size? {self.num_ranks_in_param_group}", force=True) logger.debug( "hpZeRO partition parameter my rank in world {} my rank in group {} ranks in my param partition group: {} " @@ -1000,6 +1027,11 @@ def __init__( if not self.use_all_gather_into_tensor: logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}") + self.use_all_reduce_for_fetch_params = get_config_default(DeepSpeedZeroConfig, + "use_all_reduce_for_fetch_params") + if _ds_config is not None: + self.use_all_reduce_for_fetch_params = _ds_config.zero_config.use_all_reduce_for_fetch_params + def _update_persist_config(self, ds_config): Init.apply_param_persistence = True Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold @@ -1117,10 +1149,10 @@ def all_gather(param_list=None, async_op=False, hierarchy=0): param_list = [cls] return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy) - def _all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group): + def _all_gather_dtype(dtype, params, forward, world_size, rank_in_group, ds_process_group): partition_sz = sum(p.ds_tensor.ds_numel for p in params) - use_secondary_tensor = params[0].ds_secondary_tensor is not None + use_secondary_tensor = params[0].ds_secondary_tensor is not None and not forward if use_secondary_tensor: partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params) @@ -1150,10 +1182,12 @@ def _all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group partitions=partitions, world_size=world_size, use_secondary_tensor=use_secondary_tensor, + forward=forward, ) @instrument_w_nvtx def all_gather_coalesced(params: Iterable[Parameter], + forward: bool = True, safe_mode: bool = False, quantize: bool = False) -> AllGatherCoalescedHandle: @@ -1172,8 +1206,8 @@ def all_gather_coalesced(params: Iterable[Parameter], ds_process_group = self.ds_process_group rank_in_group = self.rank world_size = self.dp_world_size - use_secondary_tensor = params[0].ds_secondary_tensor is not None - if self.zero_param_process_group and use_secondary_tensor: + use_secondary_tensor = params[0].ds_secondary_tensor is not None and not forward + if self.zero_param_process_group and not forward: ds_process_group = self.zero_param_process_group #intragroup rank_in_group = self.rank_in_group world_size = self.num_ranks_in_param_group @@ -1247,77 +1281,102 @@ def all_gather_coalesced(params: Iterable[Parameter], return AllGatherHandle(handle, param, quantization=quant_info) else: - if not quantize: - dtype_params = defaultdict(list) - for p in params: - dtype_params[p.ds_tensor.dtype].append(p) - handles = [] - for dtype, params in dtype_params.items(): - handles.append(_all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group)) - - return MultipleAllGatherHandles(handles) + # TODO (SW-164792): Handle use case when use_secondary_tensor is True + if self.use_all_reduce_for_fetch_params and not quantize and not use_secondary_tensor: + # Use all_reduce instead of all_gather to fetch the module params + flat_buffer_size = sum(p.ds_numel_aligned for p in params) + flat_tensor = torch.zeros(flat_buffer_size, + dtype=get_only_unique_item(p.ds_tensor.dtype for p in params), + device=get_accelerator().current_device_name(), + requires_grad=False) + start_param = 0 + for param in params: + param.data = flat_tensor.narrow(0, start_param, param.ds_numel).view(param.ds_shape) + start = start_param + param.ds_tensor.ds_numel * self.get_partition_rank() + flat_tensor.narrow(0, start, param.ds_tensor.ds_numel).copy_(param.ds_tensor) - else: - partition_sz = sum(p.ds_tensor.ds_numel for p in params) + start_param += param.ds_numel - if use_secondary_tensor: - partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params) + handle = dist.all_reduce(flat_tensor, group=ds_process_group, async_op=True) - flat_tensor = torch.empty(partition_sz * world_size, - dtype=torch.int8, - device=get_accelerator().current_device_name(), - requires_grad=False) + return AllReduceCoalescedHandle(handle=handle, params=params) + else: + if not quantize: + dtype_params = defaultdict(list) + for p in params: + dtype_params[p.ds_tensor.dtype].append(p) + handles = [] + for dtype, params in dtype_params.items(): + handles.append( + _all_gather_dtype(dtype, params, forward, world_size, rank_in_group, ds_process_group)) + return MultipleAllGatherHandles(handles) - if use_secondary_tensor: - if hasattr(params[0].ds_secondary_tensor, "ds_quant_scale"): - quantized_param = instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.data.to(get_accelerator().current_device_name()) for p in params - ]) - scales = instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) - for p in params - ]) - else: - quantized_param, scales = self.quantizer_module.quantize( - instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params - ])) else: - if hasattr(params[0].ds_tensor, "ds_quant_scale"): - quantized_param = instrument_w_nvtx(torch.cat)( - [p.ds_tensor.data.to(get_accelerator().current_device_name()) for p in params]) - scales = instrument_w_nvtx(torch.cat)([ - p.ds_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) for p in params - ]) + partition_sz = sum(p.ds_tensor.ds_numel for p in params) + + if use_secondary_tensor: + partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups + for p in params) + + flat_tensor = torch.empty(partition_sz * world_size, + dtype=torch.int8, + device=get_accelerator().current_device_name(), + requires_grad=False) + + if use_secondary_tensor: + if hasattr(params[0].ds_secondary_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.data.to(get_accelerator().current_device_name()) + for p in params + ]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) + for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.to(get_accelerator().current_device_name()) + for p in params + ])) else: - quantized_param, scales = self.quantizer_module.quantize( - instrument_w_nvtx(torch.cat)( - [p.ds_tensor.to(get_accelerator().current_device_name()) for p in params])) - quant_scale_buffer = torch.empty( - scales.numel() * world_size, - dtype=torch.float32, - device=get_accelerator().current_device_name(), - requires_grad=False, - ) - handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group) - quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group) - quant_info = QuantizationInfo() - quant_info.quantized_param = flat_tensor - quant_info.backend = self.quantizer_module - quant_info.quant_handle = quant_handle - quant_info.scale_buffer = quant_scale_buffer - quant_info.partition_sz = partition_sz - quant_info.world_size = world_size - return AllGatherCoalescedHandle( - allgather_handle=handle, - params=params, - partitions=None, - world_size=world_size, - use_secondary_tensor=use_secondary_tensor, - quantization=quant_info, - ) - - def partition(param_list=None, hierarchy=0, has_been_updated=False): + if hasattr(params[0].ds_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)( + [p.ds_tensor.data.to(get_accelerator().current_device_name()) for p in params]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) + for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)( + [p.ds_tensor.to(get_accelerator().current_device_name()) for p in params])) + quant_scale_buffer = torch.empty( + scales.numel() * world_size, + dtype=torch.float32, + device=get_accelerator().current_device_name(), + requires_grad=False, + ) + handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group) + quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group) + quant_info = QuantizationInfo() + quant_info.quantized_param = flat_tensor + quant_info.backend = self.quantizer_module + quant_info.quant_handle = quant_handle + quant_info.scale_buffer = quant_scale_buffer + quant_info.partition_sz = partition_sz + quant_info.world_size = world_size + return AllGatherCoalescedHandle( + allgather_handle=handle, + params=params, + partitions=None, + world_size=world_size, + use_secondary_tensor=use_secondary_tensor, + forward=forward, + quantization=quant_info, + ) + + def partition(param_list=None, backward=False, hierarchy=0, has_been_updated=False): cls = param print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}", force=False) @@ -1471,7 +1530,7 @@ def _partition(self, param_list, force=False, has_been_updated=False): for param in param_list: print_rank_0(f"Before Partitioning Param {param.ds_id}", force=False) if self.zero_param_process_group is not None: - self._partition_param_sec(param) + self._partition_param_sec(param, has_been_updated=has_been_updated) self._partition_param(param, has_been_updated=has_been_updated) param.ds_status = ZeroParamStatus.NOT_AVAILABLE @@ -1551,6 +1610,7 @@ def _partition_param(self, param, buffer=None, has_been_updated=False): param.ds_tensor.ds_numel = partition_size param.ds_tensor.status = PartitionedParamStatus.AVAILABLE param.ds_tensor.final_location = final_location + param.ds_numel_aligned = tensor_size start = partition_size * self.get_partition_rank() end = start + partition_size @@ -1606,28 +1666,30 @@ def _partition_param_sec(self, param, buffer=None, has_been_updated=False): ##support for NVME secondary param offload #print_rank_0(f"SEC Param id {param.ds_id} status is {param.ds_status}", force=True) if param.ds_status is ZeroParamStatus.AVAILABLE: + if param.ds_secondary_tensor is not None and not has_been_updated: ##param already partitioned + + return #check padding tensor_size = self._aligned_size(param) partition_size = tensor_size // self.dp_world_size secondary_partition_size = int(tensor_size // self.num_ranks_in_param_group) - if param.ds_secondary_tensor is None: - final_location = None - secondary_partitioned_tensor = torch.empty(secondary_partition_size, - dtype=param.dtype, - device=self.remote_device) - - if self.pin_memory: - secondary_partitioned_tensor = secondary_partitioned_tensor.pin_memory() - # quantize the tensor if it's not trainable - if not param.requires_grad and self.quantized_nontrainable_weights: - secondary_partitioned_tensor, secondary_partitioned_tensor.ds_quant_scale = self.quantizer_module.quantize( - secondary_partitioned_tensor) - secondary_partitioned_tensor.requires_grad = False - param.ds_secondary_tensor = secondary_partitioned_tensor - param.ds_secondary_tensor.ds_numel = secondary_partition_size - param.ds_secondary_tensor.status = PartitionedParamStatus.AVAILABLE - param.ds_secondary_tensor.final_location = final_location + final_location = None + secondary_partitioned_tensor = torch.empty(secondary_partition_size, + dtype=param.dtype, + device=self.remote_device) + + if self.pin_memory: + secondary_partitioned_tensor = secondary_partitioned_tensor.pin_memory() + # quantize the tensor if it's not trainable + if not param.requires_grad and self.quantized_nontrainable_weights: + secondary_partitioned_tensor, secondary_partitioned_tensor.ds_quant_scale = self.quantizer_module.quantize( + secondary_partitioned_tensor) + secondary_partitioned_tensor.requires_grad = False + param.ds_secondary_tensor = secondary_partitioned_tensor + param.ds_secondary_tensor.ds_numel = secondary_partition_size + param.ds_secondary_tensor.status = PartitionedParamStatus.AVAILABLE + param.ds_secondary_tensor.final_location = final_location #use rank in group for secondary tensor secondary_start = secondary_partition_size * self.rank_in_group @@ -1679,7 +1741,8 @@ def _allgather_param(self, param, async_op=False, hierarchy=0): f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', force=False) - get_accelerator().synchronize() + if not get_accelerator().device_name() == "hpu": + get_accelerator().synchronize() print_rank_0( f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" @@ -1812,7 +1875,8 @@ def _allgather_params_coalesced(self, param_list, hierarchy=0, quantize=False): param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data # guarantee the communication to be completed - get_accelerator().synchronize() + if not get_accelerator().device_name() == "hpu": + get_accelerator().synchronize() return None diff --git a/deepspeed/runtime/zero/partitioned_param_coordinator.py b/deepspeed/runtime/zero/partitioned_param_coordinator.py index 8fc962c4f2a7..7a18ed3a0019 100644 --- a/deepspeed/runtime/zero/partitioned_param_coordinator.py +++ b/deepspeed/runtime/zero/partitioned_param_coordinator.py @@ -125,7 +125,7 @@ def __init__( # mechanism which doesn't require any configuration by the user. self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON - self.__max_ongoing_fetch_events: int = 2 + self.__max_ongoing_fetch_events: int = 2 if get_accelerator().device_name() != "hpu" else -1 self.__profiler = PartitionedParameterProfiler(timers if ENABLE_PROFILER else None) """Tracing and Tracking @@ -303,7 +303,7 @@ def fetch_sub_module(self, current_submodule: Module, forward: bool) -> None: with get_accelerator().stream(self.__allgather_stream): while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query(): self.__ongoing_fetch_events.popleft() - if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events: + if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events > -1: self.__ongoing_fetch_events.popleft().synchronize() self.__inflight_param_registry.pop(param).wait() @@ -392,7 +392,7 @@ def _is_currently_on_nvme(param): @instrument_w_nvtx @torch.no_grad() - def release_sub_module(self, submodule: Module) -> None: + def release_sub_module(self, submodule: Module, backward: bool) -> None: """release the parameters of a sub module, assuming they meet conditions to be released.""" params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set( @@ -400,7 +400,7 @@ def release_sub_module(self, submodule: Module) -> None: for param in iter_params(submodule, recurse=z3_leaf_module(submodule)): param.ds_active_sub_modules.discard(submodule.id) if param.ds_id in params_to_release and not param.is_external_param: - self.__release_param(param) + self.__release_param(param, backward) @instrument_w_nvtx @torch.no_grad() @@ -413,7 +413,7 @@ def release_and_reset_all(self, module: Module) -> None: # TODO. make this throw if if there are still active submodules. currently # there's a hook execution issue param.ds_active_sub_modules.clear() - self.__release_param(param) + self.__release_param(param, backward=False) for param in iter_params(module, recurse=True): if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: @@ -444,27 +444,19 @@ def __all_gather_params_(self, params: Set[Parameter], forward: bool, quantize: all_gather_numel += param.ds_numel if partitioned_params: + partitioned_params self.__n_available_params += all_gather_numel - # here we need to handle a special case where some of the parameters have a valid hpz secondary tensor (e.g. they are not trainable so their secondary tensor never expire) but others do not. - partitioned_params_with_secondary_tensors = [ - p for p in partitioned_params if p.ds_secondary_tensor is not None - ] - partitioned_params_without_secondary_tensors = [ - p for p in partitioned_params if p.ds_secondary_tensor is None - ] - for param_group in [ - partitioned_params_with_secondary_tensors, partitioned_params_without_secondary_tensors - ]: - if not param_group: - continue - with get_accelerator().stream(self.__allgather_stream): - event_name = __class__.FORWARD_ALL_GATHER if forward else __class__.BACKWARD_ALL_GATHER - self.__profiler.start_event(event_name) - handle = param_group[0].all_gather_coalesced(param_group, quantize=quantize) - self.__profiler.stop_event(event_name, all_gather_numel) - for param in param_group: - assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary() - self.__inflight_param_registry[param] = handle + with get_accelerator().stream(self.__allgather_stream): + event_name = __class__.FORWARD_ALL_GATHER if forward else __class__.BACKWARD_ALL_GATHER + self.__profiler.start_event(event_name) + handle = partitioned_params[0].all_gather_coalesced(partitioned_params, + forward=forward, + quantize=quantize) + self.__profiler.stop_event(event_name, all_gather_numel) + + for param in partitioned_params: + assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary() + self.__inflight_param_registry[param] = handle # Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU swap_persisted_params = [ @@ -475,11 +467,11 @@ def __all_gather_params_(self, params: Set[Parameter], forward: bool, quantize: @compiler.disable @instrument_w_nvtx - def __release_param(self, param: Parameter) -> None: + def __release_param(self, param: Parameter, backward: bool) -> None: if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules: if logger.isEnabledFor(logging.DEBUG): debug_rank0(f"-release: {param.ds_summary()}") - param.partition() + param.partition(backward=backward) self.__n_available_params -= param.ds_numel @instrument_w_nvtx diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 03813afa4ed1..0701cdda0a18 100644 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -11,11 +11,11 @@ from deepspeed.utils import groups from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from deepspeed.utils import logger from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce -from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter, get_only_unique_item +from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -215,14 +215,12 @@ def __init__( self.module = module self.elastic_checkpoint = elastic_checkpoint - self.inf_or_nan_tracker: Tensor = torch.zeros(1, - dtype=torch.bool, - device=get_accelerator().current_device_name(), - requires_grad=False) + self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu + + self.inf_or_nan_tracker: Tensor = torch.zeros(1, dtype=torch.bool, device=self.device, requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) - self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication self.reduce_and_partition_stream = None if get_accelerator().is_synchronized_device() else get_accelerator( ).Stream() if overlap_comm else get_accelerator().default_stream() @@ -326,7 +324,7 @@ def __init__( self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON - self.max_param_reduce_events: int = 2 + self.max_param_reduce_events: int = 2 if get_accelerator().device_name() != "hpu" else -1 self.param_dict = {} @@ -468,16 +466,13 @@ def invalidate_secondary_tensor(self): param.ds_secondary_tensor = None def _setup_for_real_optimizer(self): - see_memory_usage("Before creating fp32 partitions", force=True) self._create_fp32_partitions() - see_memory_usage("After creating fp32 partitions", force=True) dist.barrier() # To support pipelined optimizer swapping self._create_next_swappable_fp32_groups() see_memory_usage("Before initializing optimizer states", force=True) - self.initialize_optimizer_states() see_memory_usage("After initializing optimizer states", force=True) dist.barrier() @@ -548,6 +543,7 @@ def defragment(tensors: List[Tensor]) -> Tensor: offset += tensor_numel gc.collect() + #TODO SW-107191: support empty_cache() in hpu get_accelerator().empty_cache() # copy tensors (now flattened and contiguous) back to GPU @@ -1259,7 +1255,7 @@ def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None: while self.param_reduce_events and self.param_reduce_events[0].query(): self.param_reduce_events.popleft() - if len(self.param_reduce_events) > self.max_param_reduce_events: + if len(self.param_reduce_events) > self.max_param_reduce_events > -1: self.param_reduce_events.popleft().synchronize() with get_accelerator().stream(self.reduce_and_partition_stream): @@ -1412,7 +1408,7 @@ def complete_grad_norm_calculation_for_cpu_offload(self, params): err = torch.tensor(-1.0, device=self.device, dtype=torch.float) total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm - return total_norm + return total_norm.cpu() @instrument_w_nvtx def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: @@ -1988,7 +1984,7 @@ def _post_step(self, timer_names): if self.swap_optimizer: self.optimizer_swapper.log_timers() - self.invalidate_secondary_tensor() + # self.invalidate_secondary_tensor() # given that we want hpz in forward pass when no_grad is set, we need to keep the secondary tensor self.timers.log(timer_names) @@ -2027,7 +2023,7 @@ def step(self, closure=None): return norm_groups = self._get_norm_groups() - scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) + scaled_global_grad_norm = torch.norm(torch.stack(norm_groups)) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.loss_scale @@ -2147,7 +2143,8 @@ def has_overflow(self, partition_gradients=True): self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any() self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 - overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8) + overflow_gpu = self.inf_or_nan_tracker.clone().to(get_accelerator().current_device_name()).to( + torch.uint8) self.inf_or_nan_tracker.zero_() if not get_accelerator().resolves_data_dependency(): diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 71a01b2391f8..778f66bb95ed 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -4,21 +4,20 @@ # DeepSpeed Team import torch -import os from deepspeed import comm as dist from packaging import version as pkg_version from collections import OrderedDict from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler -from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, empty_cache, see_memory_usage, inf, - is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups) - +from deepspeed.runtime.utils import (empty_cache, see_memory_usage, inf, is_model_parallel_parameter, + align_dense_tensors, all_gather_dp_groups) from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.utils import logger +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.moe.utils import is_moe_param from deepspeed.git_version_info import version @@ -1360,7 +1359,7 @@ def reduce_ipg_grads(self): self.average_tensor(extra_large_grad_reduc.view(-1)) self.extra_large_param_to_reduce = None else: - self.average_tensor(self.ipg_buffer[self.ipg_index]) + self.average_tensor(self.ipg_buffer[self.ipg_index].narrow(0, 0, self.elements_in_ipg_bucket)) else: self.buffered_reduce_fallback(None, self.grads_in_ipg_bucket, @@ -2287,32 +2286,13 @@ def load_state_dict(self, self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): - self._load_hp_checkpoint_state(checkpoint_folder) + self.load_hp_checkpoint_state_from_checkpoint_dir("bit16_groups", checkpoint_folder) @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups - def _load_hp_checkpoint_state(self, checkpoint_dir): - checkpoint_dir = os.path.join(checkpoint_dir, "zero") - optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt") - assert os.path.isfile( - optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.' - optim_sd = torch.load(optim_state_path) - self._load_global_state(optim_sd) - - tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) - tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ - else self.mpu.get_tensor_model_parallel_world_size() - - for i, _ in enumerate(self.optimizer.param_groups): - for lp in self.bit16_groups[i]: - if lp._hp_mapping is not None: - #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") - lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, - tp_world_size) - def _load_global_state(self, sd): self.loss_scaler = sd.get(LOSS_SCALER, self.loss_scaler) self.dynamic_loss_scale = sd.get('dynamic_loss_scale', self.dynamic_loss_scale) @@ -2446,7 +2426,9 @@ def estimate_zero2_model_states_mem_needs(total_params, gpu_mem = 2 * total_params cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor else: - gpu_mem = 4 * total_params + int(16 * total_params / total_gpus) + # GPU's total_params multipliers: 2 = params_16bit, + # 14 = 2_grads_16bit + 4_params_32bit + 8_optimizer_states_32bit(momentum and variance) + gpu_mem = 2 * total_params + int(14 * total_params / total_gpus) cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor return int(cpu_mem), int(gpu_mem) diff --git a/deepspeed/runtime/zero/utils.py b/deepspeed/runtime/zero/utils.py index f61715bd4387..da0efd8726cd 100755 --- a/deepspeed/runtime/zero/utils.py +++ b/deepspeed/runtime/zero/utils.py @@ -50,6 +50,13 @@ class ZeRORuntimeException(Exception): except ImportError: pass +# Add hpex FusedAdamW to supported list if hpex is installed +try: + from habana_frameworks.torch.hpex.optimizers import FusedAdamW + ZERO_SUPPORTED_OPTIMIZERS.append(FusedAdamW) +except ImportError: + pass + def is_zero_supported_optimizer(optimizer): if dist.get_rank() == 0: @@ -65,7 +72,6 @@ def get_lst_from_rank0(lst: List[int]) -> None: lst_tensor = torch.tensor( lst if dist.get_rank() == 0 else [-1] * len(lst), dtype=int, - # device=get_accelerator().current_device_name(), device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])), requires_grad=False, ) diff --git a/deepspeed/tools/__init__.py b/deepspeed/tools/__init__.py new file mode 100644 index 000000000000..36d8fbe03558 --- /dev/null +++ b/deepspeed/tools/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/deepspeed/tools/pg_sim/__init__.py b/deepspeed/tools/pg_sim/__init__.py new file mode 100644 index 000000000000..917c5a255fa8 --- /dev/null +++ b/deepspeed/tools/pg_sim/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .pg import install_sim_dist_backend diff --git a/deepspeed/tools/pg_sim/pg.py b/deepspeed/tools/pg_sim/pg.py new file mode 100644 index 000000000000..b7e2483f86d0 --- /dev/null +++ b/deepspeed/tools/pg_sim/pg.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import warnings +import torch +import torch.distributed as dist + +from datetime import timedelta +from functools import partial +from torch.distributed.constants import default_pg_timeout +from torch.distributed.distributed_c10d import (GroupMember, BroadcastOptions, AllreduceOptions, ReduceOp) +from torch.distributed.distributed_c10d import STORE_BASED_BARRIER_PREFIX # noqa +from deepspeed.accelerator import get_accelerator + + +class SimProcessGroup: + BACKEND = "sim" + DEFAULT_PG = None + WORLD_SIZE = -1 + STORE = None + + def __init__(self, rank, world_size, timeout, backend): + self.sim_rank = rank + self.pg_world_size = world_size + self.timeout = timeout + self.backend = backend + self.pg = None + self.torch_ver_major = int(torch.__version__.split('.')[0]) + self.torch_ver_minor = int(torch.__version__.split('.')[1]) + + assert self.torch_ver_major == 1, \ + f"Torch version major != 1 is not supported (version={torch.__version__})" + assert self.torch_ver_minor >= 10, \ + f"Torch version < 1.10 is not supported (version={torch.__version__})" + + if self.torch_ver_minor < 13: + warnings.warn(f"Torch version < 1.13 is not tested (version={torch.__version__})") + + # default is the first process group created + if SimProcessGroup.DEFAULT_PG is None: + SimProcessGroup.DEFAULT_PG = self + + @staticmethod + def get_dist_group_count(): + return torch.distributed.distributed_c10d._group_count + + @classmethod + def store_add_rest_of_world(cls, next_group): + group = cls.get_dist_group_count() + (1 if next_group else 0) + store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group}" + cls.STORE.add(store_key, cls.WORLD_SIZE - 1) + + def _create_pg(self): + self.store_add_rest_of_world(next_group=False) + pg = dist.new_group(ranks=[0], timeout=self.timeout, backend=self.backend, pg_options=None) + return pg + + def post_create_sim_group(self): + self.pg = self._create_pg() + + @classmethod + def default_pg(cls): + assert cls.DEFAULT_PG is not None + return cls.DEFAULT_PG + + def size(self): + return self.pg_world_size + + def rank(self): + return self.sim_rank + + # ---------------------------------------------------- + # P2P + # + # P2P operations are simulated as all_reduce + # ---------------------------------------------------- + class P2PRequestObject: + """ Dummy p2p request object that is returned for p2p ops""" + + def __init__(self, src): + self.src = src + + def wait(self): + return + + def is_completed(self): + return True + + def _source_rank(self): + return self.src + + def _p2p_op(self, tensor_list, src=None): + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + self.pg.allreduce(tensor_list, opts).wait() + src = src if src is not None else self.sim_rank + return SimProcessGroup.P2PRequestObject(src=src) + + def send(self, tensor_list, _group_dst_rank, _tag): + return self._p2p_op(tensor_list) + + def recv_anysource(self, tensor_list, _tag): + return self._p2p_op(tensor_list) + + def recv(self, tensor_list, src, _tag): + return self._p2p_op(tensor_list, src=src) + + # ---------------------------------------------------- + # Collectives + # + # For some collectives, it is required to shrink the + # input/output tensors_list to 1-element (world_size=1). + # also, need to make all other members of tensors_list to depend + # on the first element - to prevent incorrect graph signaling. + # The logic of shrink and then copy is handled by: + # - _adjust_tensors_list_to_ws1 + # - _copy_data_from_tensor_to_tensor_list + # ---------------------------------------------------- + @staticmethod + def _to_device(tensors, device): + if isinstance(tensors, dict): + return {k: SimProcessGroup._to_device(v, device) for k, v in tensors.items()} + elif isinstance(tensors, list): + return [SimProcessGroup._to_device(v, device) for v in tensors] + elif isinstance(tensors, torch.Tensor): + return tensors.to(device) + else: + assert False, 'Unsupported tensors type' + + def broadcast(self, tensors, opts): + """ ignore opts.rootRank and override to be the source """ + opts.rootRank = self.sim_rank + tensors = self._to_device(tensors, get_accelerator().current_device_name()) + return self.pg.broadcast(tensors, opts) + + def allreduce(self, tensors, opts): + return self.pg.allreduce(tensors, opts) + + def allreduce_coalesced(self, tensors, opts): + return self.pg.allreduce_coalesced(tensors, opts) + + def reduce(self, tensors, opts): + if opts.rootRank == self.sim_rank: + return self.pg.reduce(tensors, opts) + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + broadcast_opts.rootTensor = opts.rootTensor + return self.pg.broadcast(tensors, broadcast_opts) + + def _adjust_tensors_list_to_ws1(self, tensors_list): + """ receives list of lists of tensors and returns lists + of list-size-1 to match the world_size=1 + """ + world1_tensors_list = [] + for i, tensors in enumerate(tensors_list): + world1_tensors_list.append(tensors[self.sim_rank:self.sim_rank + 1]) + return world1_tensors_list + + @staticmethod + def _copy_data_from_tensor_to_tensor_list(source_tensors, tensors_list): + """ copy data from source tensors to all tensors in tensor list """ + for i, tensors in enumerate(tensors_list): + for t in tensors: + t.data[:] = source_tensors[i][0].data[:] + + def allgather(self, tensors_list, input_tensors, *kwargs): + world1_tensors_list = self._adjust_tensors_list_to_ws1(tensors_list) + handle = self.pg.allgather(world1_tensors_list, input_tensors, *kwargs) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, tensors_list) + return handle + + def gather(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(output_tensors) + handle = self.pg.gather(world1_tensors_list, input_tensors, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, output_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(input_tensors, broadcast_opts) + + def scatter(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(output_tensors, broadcast_opts) + + def reduce_scatter(self, output_tensors, input_tensors, opts): + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.reduce_scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + def alltoall(self, output_tensors, input_tensors, _opts): + world1_in_tensors_list = input_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list = output_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list[0].data[:] = world1_in_tensors_list[0].data[:] + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + handle = self.pg.allreduce(world1_out_tensors_list, opts) + return handle + + def barrier(self, opts): + opts.device_ids = [self.sim_rank] + return self.pg.barrier(opts) + + # ---------------------------------------------------- + # Create group registered function + # ---------------------------------------------------- + @classmethod + def create(cls, _store, rank, world_size, timeout, backend): + return cls(rank, world_size, timeout, backend) + + +def install_sim_dist_backend(sim_world_size, sim_rank): + + def wrapped_dist_init_process_group(backend, + init_method=None, + timeout=default_pg_timeout, + world_size=-1, + rank=-1, + store=None, + group_name="", + pg_options=None): + assert world_size == -1 or world_size == sim_world_size, \ + f'Inconsistent world_size: sim={sim_world_size} dist_init={world_size}' + + assert rank == -1 or rank == sim_rank, \ + f'Inconsistent rank: sim={sim_rank} dist_init={rank}' + + if backend == 'hccl': + import habana_frameworks.torch.distributed.hccl # noqa: F401 + + # override provided init_method/store with a dummy store + # For debug, it is better to use FileStore: + # import os + # my_store_filename = '/tmp/my_store' + # os.remove(my_store_filename) if os.path.exists(my_store_filename) else None + # os.remove(my_store_filename) + # store = torch.distributed.FileStore(my_store_filename, world_size) + store = torch.distributed.TCPStore(host_name="localhost", + port=12355, + world_size=sim_world_size, + is_master=True, + timeout=timedelta(seconds=300), + wait_for_workers=False) + + # set the simulated world size + SimProcessGroup.WORLD_SIZE = sim_world_size + SimProcessGroup.STORE = store + + # register sim backend + # create_fn = partial(SimProcessGroup.create, backend=default_backend) + create_fn = partial(SimProcessGroup.create, backend=backend) + dist.Backend.register_backend(SimProcessGroup.BACKEND, create_fn) + + # emulate all other world devices has joined the newly created group + SimProcessGroup.store_add_rest_of_world(next_group=True) + + orig_dist_init_process_group(backend=SimProcessGroup.BACKEND, + timeout=timeout, + world_size=sim_world_size, + rank=sim_rank, + store=store, + group_name=group_name, + pg_options=pg_options) + + SimProcessGroup.default_pg().post_create_sim_group() + + def wrapped_dist_new_group(ranks=None, timeout=default_pg_timeout, backend=None, pg_options=None): + SimProcessGroup.store_add_rest_of_world(next_group=True) + pg = orig_dist_new_group(ranks=ranks, timeout=timeout, backend=backend, pg_options=pg_options) + + if pg != GroupMember.NON_GROUP_MEMBER: + if backend is None or backend == SimProcessGroup.BACKEND: + pg.post_create_sim_group() + + return pg + + def wrapped_dist_broadcast_object_list(object_list, src=0, group=None, device=None): + rank = SimProcessGroup.default_pg().sim_rank + if src != sim_rank: + raise RuntimeError(f'SimProcessGroup does not support dist.broadcast_object_list() ' + f'for src={src} different than sim_rank={rank}') + return orig_dist_broadcast_object_list(object_list, src, group, device) + + orig_dist_init_process_group = dist.init_process_group + dist.init_process_group = wrapped_dist_init_process_group + + orig_dist_new_group = dist.new_group + dist.new_group = wrapped_dist_new_group + + orig_dist_broadcast_object_list = dist.broadcast_object_list + dist.broadcast_object_list = wrapped_dist_broadcast_object_list diff --git a/deepspeed/tools/pg_sim/ut/base.py b/deepspeed/tools/pg_sim/ut/base.py new file mode 100644 index 000000000000..24889f944070 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/base.py @@ -0,0 +1,311 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +import functools +import torch +import torch.distributed as dist +import pytest + +from pg_sim.pg import (install_sim_dist_backend, GroupMember) + + +class TestBaseWrapper: + """ + BaseTestWrapper class ensures that the test cases encapsulated + in ProcessGroupSimTestBase will only be executed by subclasses. + """ + + class ProcessGroupSimTestBase(unittest.TestCase): + + def setUp(self) -> None: + self.world_size = 8 + self.rank = 0 + self.backend = self.get_backend() + self.device = self.get_device() + + self.assertIsNotNone(self.backend) + self.assertIsNotNone(self.device) + + install_sim_dist_backend(sim_world_size=self.world_size, sim_rank=self.rank) + + dist.init_process_group(backend=self.backend, + init_method=None, + store=None, + rank=self.rank, + world_size=self.world_size) + + def get_backend(self): + self.assertTrue(False, msg='get_backend must be implemented by derived test') + + def get_device(self): + self.assertTrue(False, msg='get_device must be implemented by derived test') + + def _get_row_first_rank(self): + row_ranks = list(set(range(self.world_size)) - {self.rank}) + return row_ranks[0] if row_ranks else None + + @staticmethod + def _get_torch_version(): + return int(torch.__version__.split('.')[1]) + + @pytest.mark.forked + def test_world(self): + res_rank = dist.get_rank() + res_ws = dist.get_world_size() + self.assertEqual(res_rank, self.rank) + self.assertEqual(res_ws, self.world_size) + + @pytest.mark.forked + def test_new_group(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + + pg_1 = dist.new_group(ranks=[self.rank]) + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM, group=pg_1) + self.assertTrue(t.eq(t_in_out).all()) + + row_rank = self._get_row_first_rank() + if row_rank: + pg_2 = dist.new_group(ranks=[row_rank]) + self.assertEqual(pg_2, GroupMember.NON_GROUP_MEMBER) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_impl(self, src): + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=False) + self.assertIsNone(handle) + + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_broadcast_src(self): + self._test_broadcast_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_broadcast_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_object_type_impl(self, src): + if dist.get_rank() == src: + objects = ["foo", 12, {1: 2}] + else: + objects = [None, None, None] + + dev = torch.device(self.device) + dist.broadcast_object_list(objects, src=src, device=dev) + + @pytest.mark.forked + def test_broadcast_object_type_src(self): + self._test_broadcast_object_type_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_object_type_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + with pytest.raises(RuntimeError): + self._test_broadcast_object_type_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_reduce(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM) + self.assertTrue(t.eq(t_in_out).all()) + + def _test_reduce_impl(self, dst): + t = torch.tensor([1.0, 2.0]).to(self.device) + t_in_out = t.clone() + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=False) + self.assertIsNone(handle) + self.assertTrue(t.eq(t_in_out).all()) + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(t.eq(t_in_out).all()) + + @pytest.mark.forked + def test_reduce_src(self): + self._test_reduce_impl(dst=self.rank) + + @pytest.mark.forked + def test_reduce_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_reduce_impl(dst=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_gather(self): + tensor_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + tensor = torch.ones(2).to(self.device) + + handle = dist.all_gather(tensor_list, tensor, async_op=False) + self.assertIsNone(handle) + self.assertTrue(tensor_list[0].eq(tensor).all()) + + handle = dist.all_gather(tensor_list, tensor, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(tensor_list[0].eq(tensor).all()) + + def _test_gather_impl(self, dst, local_dst): + torch_version = self._get_torch_version() + if (self.backend == 'nccl') and (torch_version <= 10): + self.skipTest(f'Skipping {self._testMethodName} for nccl ' + f'for torch.version={torch_version}') + + tensor = torch.ones(2).to(self.device) + gather_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_dst else None + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=False) + self.assertIsNone(handle) + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + @pytest.mark.forked + def test_gather_src(self): + self._test_gather_impl(dst=self.rank, local_dst=True) + + @pytest.mark.forked + def test_gather_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_gather_impl(dst=row_rank, local_dst=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_scatter_impl(self, src, local_src): + if self.backend not in ('gloo', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + tensor = torch.ones(2).to(self.device) + scatter_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_src else None + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=False) + self.assertIsNone(handle) + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + @pytest.mark.forked + def test_scatter_src(self): + self._test_scatter_impl(src=self.rank, local_src=True) + + @pytest.mark.forked + def test_scatter_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_scatter_impl(src=row_rank, local_src=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_reduce_scatter(self): + if self.backend not in ('nccl', 'hccl'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output = torch.ones(2).to(self.device) + input_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + + handle = dist.reduce_scatter(output, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue(input_list[self.rank].eq(output).all()) + + handle = dist.reduce_scatter(output, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(input_list[self.rank].eq(output).all()) + + @pytest.mark.forked + def test_all_to_all(self): + if self.backend not in ('nccl', 'hccl', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output_list = [torch.zeros(1).to(self.device) for _ in range(self.world_size)] + input_list = list( + torch.arange(self.world_size, dtype=torch.float32).add(1.).to(self.device).chunk(self.world_size)) + + expected_res = [ + torch.zeros(1).to(self.device) if i != self.rank else torch.ones(1).to(self.device) + for i in range(self.world_size) + ] + + handle = dist.all_to_all(output_list, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + handle = dist.all_to_all(output_list, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + @pytest.mark.forked + def test_barrier(self): + handle = dist.barrier(async_op=False) + self.assertIsNone(handle) + + handle = dist.barrier(async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_send(self): + tensor = torch.ones(2).to(self.device) + dist.send(tensor, dst=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.send(tensor, dst=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.isend(tensor, dst=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.isend(tensor, dst=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_recv(self): + tensor = torch.zeros(2).to(self.device) + dist.recv(tensor, src=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.recv(tensor, src=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.irecv(tensor, src=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.irecv(tensor, src=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() diff --git a/deepspeed/tools/pg_sim/ut/gloo.py b/deepspeed/tools/pg_sim/ut/gloo.py new file mode 100644 index 000000000000..347f9b96a741 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/gloo.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestGloo(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestGloo, self).setUp() + + def get_backend(self): + return 'gloo' + + def get_device(self): + return 'cpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/hccl.py b/deepspeed/tools/pg_sim/ut/hccl.py new file mode 100644 index 000000000000..37fabcfb9363 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/hccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestHccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestHccl, self).setUp() + + def get_backend(self): + return 'hccl' + + def get_device(self): + return 'hpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/nccl.py b/deepspeed/tools/pg_sim/ut/nccl.py new file mode 100644 index 000000000000..da6b8f23ed47 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/nccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestNccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestNccl, self).setUp() + + def get_backend(self): + return 'nccl' + + def get_device(self): + return 'cuda' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/tensor_logger/__init__.py b/deepspeed/tools/tensor_logger/__init__.py new file mode 100644 index 000000000000..55ea7227e48d --- /dev/null +++ b/deepspeed/tools/tensor_logger/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .tensor_logger import TensorLogger, save_logged_tensors diff --git a/deepspeed/tools/tensor_logger/tensor_logger.py b/deepspeed/tools/tensor_logger/tensor_logger.py new file mode 100644 index 000000000000..f128b9a6b0a0 --- /dev/null +++ b/deepspeed/tools/tensor_logger/tensor_logger.py @@ -0,0 +1,234 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import collections +from functools import partial +from contextlib import contextmanager +from torch.functional import Tensor +from os import makedirs +from os.path import join + + +class TensorLogger: + """ Records nn.Module's activations and gradients tensors + + Description: + Records up to end_iteration(if 0, recording is disabled) from start_iteration. + If log_activations_enabled, nn.Module's activations are recorded during forward. + If log_grads_enabled, nn.Module's gradients are recorded during back propagation. + If log_inputs_enabled, model inputs are recorded. + + Usage: + Integrated within the training loop: + tensor_logger = TensorLogger(model, start_iteration=2, end_iteration=2) + # dumps second iteration only, iteration number starts with 1 + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger.log_iteration(i): + # run forward/backward iteration + + tensor_logger.save(filename) + + Another alternative: + tensor_logger = TensorLogger(model, end_iteration=2) + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger: + tensor_logger.set_iteration(i) + # run forward/backward iteration + + tensor_logger.save(filename) + + Implementation notes: + forward/backward activations/gradients are collected using nn.Module hooks. + However, model inputs are collected by overloading model.forward() method. + Model inputs can't be collected using the hooks since the hooks only provide + inputs and do not provide kwargs, if exist, of the forward method. + """ + + def __init__(self, + model, + start_iteration=0, + end_iteration=0, + log_activations_enabled=False, + log_grads_enabled=False, + log_inputs_enabled=False, + prefix=None): + + # for now, no support for virtual pipeline (interleaved) + if isinstance(model, list): + assert len(model) == 1, 'No support for list of multiple models (len={})'.format(len(model)) + model = model[0] + + self.model = model + self.start_iteration = start_iteration + self.end_iteration = end_iteration + self.log_activations_enabled = log_activations_enabled + self.log_grads_enabled = log_grads_enabled + self.log_inputs_enabled = log_inputs_enabled + self.prefix = 'model' if prefix is None else prefix + + # captured tensors are saved in the following hierarchy: + # { + # iteration: { # iteration number + # tensor_type: { # fwd_act/bwd_grad_in/bwd_grad_out + # name: [tensors] # tensor name's tensors. list is required due to e.g. grad accumulation + # } + # } + # } + class IterData(dict): + + def __init__(self): + super(IterData, self).__init__() + self['fwd_act'] = collections.defaultdict(list) + self['bwd_grad_in'] = collections.defaultdict(list) + self['bwd_grad_out'] = collections.defaultdict(list) + self['model_inputs'] = collections.defaultdict(list) + + self.data = collections.defaultdict(IterData) + self.active = False + self.current_iteration = 0 + self.fwd_handles = [] + self.bwd_handles = [] + + def _fqn(self, name): + return '.'.join([self.prefix, name]) if name else self.prefix + + def set_iteration(self, iteration): + self.current_iteration = iteration + + def get_num_recorded_iterations(self): + return len(self.data) + + @contextmanager + def log_iteration(self, iteration): + self.current_iteration = iteration + self._enable() + yield self + self._disable() + + def __enter__(self): + self._enable() + return self + + def __exit__(self): + self._disable() + + def clear(self): + self.data.clear() + + def save(self, filename, do_clear=True): + + def convert_for_pickle(obj): + if isinstance(obj, dict): + return {k: convert_for_pickle(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_for_pickle(e) for e in obj] + elif isinstance(obj, tuple): + return tuple([convert_for_pickle(e) for e in obj]) + else: + if isinstance(obj, Tensor): + return obj.detach().cpu() + else: + return obj + + data = convert_for_pickle(self.data) + torch.save(data, filename) + self.clear() if do_clear else None + + def _enable(self): + if not self.active and self.start_iteration <= self.current_iteration <= self.end_iteration: + self.active = True + self._enable_log_grads() if self.log_grads_enabled else None + self._enable_log_activations() if self.log_activations_enabled else None + self._enable_log_inputs() if self.log_inputs_enabled else None + + def _disable(self): + if self.active: + self.active = False + self._disable_log_grads() + self._disable_log_activations() + self._disable_log_inputs() + + @staticmethod + def _extract_tensors(t): + if t is None: + return None + elif isinstance(t, int): + return torch.tensor(t) + elif isinstance(t, torch.Tensor): + return t.detach().contiguous() + elif isinstance(t, list): + return [TensorLogger._extract_tensors(e) for e in t] + elif isinstance(t, tuple): + return tuple(TensorLogger._extract_tensors(e) for e in t) + elif isinstance(t, dict): + return {k: TensorLogger._extract_tensors(v) for k, v in t.items()} + assert False, 'Unsupported type: {}'.format(type(t)) + + def _save_fwd_activation(self, name, _mod, _inp, out): + fwd_act = self._extract_tensors(out) + self.data[self.current_iteration]['fwd_act'][name].append(fwd_act) + + def _save_bwd_grads(self, name, _mod, grad_input, grad_output): + grad_in = self._extract_tensors(grad_input) + grad_out = self._extract_tensors(grad_output) + self.data[self.current_iteration]['bwd_grad_in'][name].append(grad_in) + self.data[self.current_iteration]['bwd_grad_out'][name].append(grad_out) + + def _save_inputs(self, *inp, **kwargs): + model_inputs = self._extract_tensors(inp) + model_kwargs = self._extract_tensors(kwargs) + self.data[self.current_iteration]['model_inputs']['inputs'].append(model_inputs) + self.data[self.current_iteration]['model_inputs']['kwargs'].append(model_kwargs) + + def _enable_log_grads(self): + #Revert after [SW-69765] is fixed + full_bwd_hook_supported = False + for name, m in self.model.named_modules(): + register_fn = m.register_full_backward_hook if full_bwd_hook_supported else m.register_backward_hook + h = register_fn(partial(self._save_bwd_grads, self._fqn(name))) + self.bwd_handles.append(h) + + def _enable_log_activations(self): + for name, m in self.model.named_modules(): + h = m.register_forward_hook(partial(self._save_fwd_activation, self._fqn(name))) + self.fwd_handles.append(h) + + def _enable_log_inputs(self): + + def wrapped_forward(*inputs, **kwargs): + self._save_inputs(*inputs, **kwargs) + return self.model.original_forward__(*inputs, **kwargs) + + self.model.original_forward__ = self.model.forward + self.model.forward = wrapped_forward + + def _disable_log_grads(self): + for h in self.bwd_handles: + h.remove() + self.bwd_handles = [] + + def _disable_log_activations(self): + for h in self.fwd_handles: + h.remove() + self.fwd_handles = [] + + def _disable_log_inputs(self): + if hasattr(self.model, 'original_forward__'): + self.model.forward = self.model.original_forward__ + del self.model.original_forward__ + + +def save_logged_tensors(tensor_logger: TensorLogger, tensor_logger_path, rank_no, iteration=None): + if tensor_logger.get_num_recorded_iterations(): + makedirs(tensor_logger_path, exist_ok=True) + filename = 'tensor_logger_rank_{}'.format(rank_no) + '.pt' + if iteration is not None: + filename = 'tensor_logger_rank_{}_iter_{}'.format(rank_no, iteration) + '.pt' + fullname = join(tensor_logger_path, filename) + tensor_logger.save(fullname) diff --git a/deepspeed/utils/__init__.py b/deepspeed/utils/__init__.py index 33ea8ba60818..75fb6aa9d30a 100644 --- a/deepspeed/utils/__init__.py +++ b/deepspeed/utils/__init__.py @@ -10,7 +10,7 @@ from .groups import * from .nvtx import instrument_w_nvtx # TODO: Move tensor fragment and mixed precision to zero utils -from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad +from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad, map_to_flat_opt_states from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state from .tensor_fragment import set_full_hp_param from .tensor_fragment import safe_set_full_fp32_param, safe_set_full_optimizer_state diff --git a/deepspeed/utils/bwc.py b/deepspeed/utils/bwc.py new file mode 100644 index 000000000000..69fcc251a684 --- /dev/null +++ b/deepspeed/utils/bwc.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + + +def bwc_tensor_model_parallel_rank(mpu=None): + """Backwards-compatible way of querying the tensor model parallel rank from + an ``mpu`` object. + + *Tensor* model parallelism means that tensors are physically split across + processes. This contrasts with *pipeline* model parallelism, in which the + layers are partitioned but tensors left intact. + + The API for tensor model parallelism has changed across versions and this + helper provides a best-effort implementation across versions of ``mpu`` + objects. The preferred mechanism is + ``mpu.get_tensor_model_parallel_rank()``. + + This should "just work" with both Megatron-LM and DeepSpeed's pipeline + parallelism. + + Args: + mpu (model parallel unit, optional): The tensor model parallel rank. + If ``mpu=None``, returns 0. Defaults to ``None``. + + Returns: + int: the rank + """ + if mpu is None: + # No model parallelism in easy :) + return 0 + + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_rank() + elif hasattr(mpu, 'get_slice_parallel_rank'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_rank() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_rank() + + +def bwc_tensor_model_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the tensor model parallel world size. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return 1 + + if hasattr(mpu, 'get_tensor_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_world_size() + elif hasattr(mpu, 'get_slice_parallel_world_size'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_world_size() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_world_size() + + +def bwc_tensor_model_parallel_group(mpu=None): + """Backwards-compatible way of querying the tensor model parallel group. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return None + + if hasattr(mpu, 'get_tensor_model_parallel_group'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_group() + elif hasattr(mpu, 'get_slice_parallel_group'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_group() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_group() + + +def bwc_pipeline_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the pipeline parallel world size.""" + world_size = 1 + if mpu is not None: + if hasattr(mpu, 'get_pipeline_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + world_size = mpu.get_pipeline_model_parallel_world_size() + elif hasattr(mpu, 'get_pipe_parallel_world_size'): + # DeepSpeed Topology + world_size = mpu.get_pipe_parallel_world_size() + return world_size + + +def bwc_pipeline_parallel_group(mpu=None): + """Backwards-compatible way of querying the pipeline parallel group.""" + if mpu is None: + return None + if hasattr(mpu, 'get_pipeline_model_parallel_group'): + # Megatron + return mpu.get_pipeline_model_parallel_group() + elif hasattr(mpu, 'get_pipe_parallel_group'): + # DeepSpeed Topology + return mpu.get_pipe_parallel_group() + assert False, 'mpu does not support pipeline parallel group' diff --git a/deepspeed/utils/groups.py b/deepspeed/utils/groups.py index 63dda7f5aaae..c49f4520e16e 100644 --- a/deepspeed/utils/groups.py +++ b/deepspeed/utils/groups.py @@ -27,6 +27,7 @@ from deepspeed import comm as dist from deepspeed.utils import log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size, bwc_pipeline_parallel_world_size from deepspeed.utils.exceptions import DeprecatedException from deepspeed.accelerator import get_accelerator # Expert parallel group that the current rank belongs to. @@ -128,31 +129,32 @@ def _create_expert_and_data_parallel(expert_parallel_size_, use_data_before_expe log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0]) world_size = dist.get_world_size() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) rank = dist.get_rank() - _ensure_divisibility(world_size, expert_parallel_size_) + pp_stride = world_size // pp_world_size + _ensure_divisibility(pp_stride, expert_parallel_size_) group_name = f"ep_size_{expert_parallel_size_}" # Build the expert data parallel groups. global _EXPERT_DATA_PARALLEL_GROUP - ep_stride = world_size // expert_parallel_size_ + ep_stride = pp_stride // expert_parallel_size_ # Only create group if it does not already exist if group_name not in _EXPERT_DATA_PARALLEL_GROUP: - for i in range(expert_parallel_size_): - if use_data_before_expert_parallel_: - ranks = range(i * ep_stride, (i + 1) * ep_stride) - else: - ranks = range(i, world_size, expert_parallel_size_) - group = dist.new_group(ranks) - log_dist(f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if use_data_before_expert_parallel_: - if i == (rank // ep_stride): - _EXPERT_DATA_PARALLEL_GROUP[group_name] = group - else: - if i == (rank % expert_parallel_size_): + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(expert_parallel_size_): + if use_data_before_expert_parallel_: + ranks = range(pp_stage_start + i * ep_stride, pp_stage_start + (i + 1) * ep_stride) + else: + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, expert_parallel_size_) + group = dist.new_group(ranks) + log_dist( + f'Creating expert data parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: _EXPERT_DATA_PARALLEL_GROUP[group_name] = group # Build the expert parallel groups. @@ -161,24 +163,29 @@ def _create_expert_and_data_parallel(expert_parallel_size_, use_data_before_expe # Only create group if it does not already exist if group_name not in _EXPERT_PARALLEL_GROUP: if use_data_before_expert_parallel_: - for i in range(ep_stride): - ranks = range(i, world_size, ep_stride) - group = dist.new_group(ranks) - log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if i == (rank % ep_stride): - _EXPERT_PARALLEL_GROUP[group_name] = group + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(ep_stride): + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, ep_stride) + group = dist.new_group(ranks) + log_dist( + f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: + _EXPERT_PARALLEL_GROUP[group_name] = group else: for i in range(world_size // expert_parallel_size_): ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) group = dist.new_group(ranks) - log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if i == (rank // expert_parallel_size_): + log_dist(f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: _EXPERT_PARALLEL_GROUP[group_name] = group def _get_expert_parallel_ranks(world_size, - model_parallel_size_, + tensor_parallel_size_, expert_parallel_size_, + pipeline_parallel_size_=1, use_data_before_expert_parallel_=False): """Generate expert parallel and expert data parallel group ranks list. @@ -193,32 +200,40 @@ def _get_expert_parallel_ranks(world_size, Args: world_size (int): Distributed world size. - model_parallel_size_ (int): Model parallel group size. + tensor_parallel_size_ (int): Tensor parallel group size. expert_parallel_size_ (int): Expert parallel group size. + pipeline_parallel_size_ (int): Pipeline parallel group size use_data_before_expert_parallel_ (bool): Use the D + E instead of E + D topology Returns: Expert parallel group ranks and Expert data parallel group ranks list. """ - _ensure_divisibility(world_size, model_parallel_size_) - dp_world_size = world_size // model_parallel_size_ + _ensure_divisibility(world_size, tensor_parallel_size_ * pipeline_parallel_size_) + dp_world_size = world_size // (tensor_parallel_size_ * pipeline_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) # Generate data parallel groups data_parallel_groups = [] - dp_group_size = model_parallel_size_ + dp_group_size = tensor_parallel_size_ + pp_stride = world_size // pipeline_parallel_size_ if use_data_before_expert_parallel_: - dp_stride = world_size // expert_parallel_size_ // model_parallel_size_ - for i in range(dp_group_size): - data_parallel_groups.append(list()) - for ds in range(dp_stride): - # [0, 4, 8, 12, 16, 20, 24, 28, 2, 6, 10, 14, 18, 22, 26, 30] - # [1, 5, 9, 13, 17, 21, 25, 29, 3, 7, 11, 15, 19, 23, 27, 31] - data_parallel_groups[-1].extend( - list(range(i + ds * model_parallel_size_, world_size, dp_stride * model_parallel_size_))) + dp_stride = world_size // expert_parallel_size_ // tensor_parallel_size_ // pipeline_parallel_size_ + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list()) + for ds in range(dp_stride): + # [0, 4, 8, 12, 16, 20, 24, 28, 2, 6, 10, 14, 18, 22, 26, 30] + # [1, 5, 9, 13, 17, 21, 25, 29, 3, 7, 11, 15, 19, 23, 27, 31] + data_parallel_groups[-1].extend( + list( + range(pp_stage_start + i + ds * tensor_parallel_size_, pp_stage_next, + dp_stride * tensor_parallel_size_))) else: - for i in range(dp_group_size): - data_parallel_groups.append(list(range(i, world_size, dp_group_size))) + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list(range(pp_stage_start + i, pp_stage_next, dp_group_size))) expert_parallel_groups = [] expert_data_parallel_groups = [] @@ -252,36 +267,33 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_ expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ assert dist.is_initialized(), "dist is not initialized" - model_parallel_size_ = mpu.get_model_parallel_world_size() + tensor_parallel_size_ = bwc_tensor_model_parallel_world_size(mpu) global expert_tensor_parallel_world_size - expert_tensor_parallel_world_size = model_parallel_size_ + expert_tensor_parallel_world_size = tensor_parallel_size_ world_size = dist.get_world_size() rank = dist.get_rank() dp_world_size = mpu.get_data_parallel_world_size() - dp_rank = mpu.get_data_parallel_rank() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) - _ensure_divisibility(world_size, model_parallel_size_) + _ensure_divisibility(world_size, tensor_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) log_dist( - f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}", - [0]) + f"Creating deepspeed groups with model parallel size {tensor_parallel_size_}, " + f"pipeline parallel size {pp_world_size}, expert parallel size {expert_parallel_size_}, " + f"world size {world_size}, dp world size {dp_world_size}", [0]) global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP - # Get world size and rank. Ensure some consistencies. - _DATA_PARALLEL_GROUP = mpu.get_data_parallel_group() - _MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group() - group_name = f"ep_size_{expert_parallel_size_}" # Only create groups if they don't already exist # Need to check conditions outside the group creation loop because of the way torch.dist group creation works if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP: expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( - world_size, model_parallel_size_, expert_parallel_size_, use_data_before_expert_parallel_) + world_size, tensor_parallel_size_, expert_parallel_size_, pp_world_size, use_data_before_expert_parallel_) for ranks in expert_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): diff --git a/deepspeed/utils/tensor_fragment.py b/deepspeed/utils/tensor_fragment.py index 49eefafcfbcc..b34722580ddd 100644 --- a/deepspeed/utils/tensor_fragment.py +++ b/deepspeed/utils/tensor_fragment.py @@ -58,6 +58,21 @@ def get_hp_fragment(self, optim_state_key=None): return self.get_optim_state_fragment(optim_state_key) +def map_to_flat_opt_states(flat_hp_tensor, lp_tensors, optim_state, opt_keys): + for key in opt_keys: + hp_param = flat_hp_tensor + buffer = torch.zeros_like(hp_param) + + for lp in lp_tensors: + if lp._hp_mapping is not None: + hp_fragment_address = lp._hp_mapping.get_hp_fragment_address() + hp_fragment = buffer.narrow(0, hp_fragment_address.start, hp_fragment_address.numel) + hp_fragment.data.copy_(lp._hp_mapping.get_hp_fragment(optim_state_key=key).data) + lp._hp_mapping.hp_fragment = hp_fragment + + optim_state[hp_param][key] = buffer + + def get_full_hp_param(self, optim_state_key=None): reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() if self._hp_mapping is not None: diff --git a/deepspeed/utils/timer.py b/deepspeed/utils/timer.py index 11ef54fe4665..23139e84385b 100755 --- a/deepspeed/utils/timer.py +++ b/deepspeed/utils/timer.py @@ -237,7 +237,8 @@ def start(self): self._init_timer() self.started = True if self.global_step_count >= self.start_step: - get_accelerator().synchronize() + if not get_accelerator().device_name() == 'hpu': + get_accelerator().synchronize() self.start_time = time.time() def stop(self, global_step=False, report_speed=True): @@ -249,13 +250,15 @@ def stop(self, global_step=False, report_speed=True): self.global_step_count += 1 if self.start_time > 0: - get_accelerator().synchronize() + if not get_accelerator().device_name() == 'hpu': + get_accelerator().synchronize() self.end_time = time.time() duration = self.end_time - self.start_time self.total_elapsed_time += duration self.step_elapsed_time += duration if global_step: + #curr_samples_sec = (self.batch_size * self.num_workers) / duration if report_speed and self.global_step_count % self.steps_per_output == 0: self.logging( "epoch={}/micro_step={}/global_step={}, RunningAvgSamplesPerSec={}, CurrSamplesPerSec={}, " diff --git a/op_builder/hpu/__init__.py b/op_builder/hpu/__init__.py index 6527ace087b5..5ad1b9a7f891 100644 --- a/op_builder/hpu/__init__.py +++ b/op_builder/hpu/__init__.py @@ -7,4 +7,5 @@ from .cpu_adam import CPUAdamBuilder from .fused_adam import FusedAdamBuilder +from .transformer_inference import InferenceBuilder from .no_impl import NotImplementedBuilder diff --git a/op_builder/hpu/fused_adam.py b/op_builder/hpu/fused_adam.py index d77228317ddb..8caff5098b35 100644 --- a/op_builder/hpu/fused_adam.py +++ b/op_builder/hpu/fused_adam.py @@ -4,10 +4,85 @@ # DeepSpeed Team -from .builder import CPUOpBuilder +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder +import torch +import math -class FusedAdamBuilder(CPUOpBuilder): + +class HPUFusedAdam: + htcore = None + is_lazy_mode = None + + @staticmethod + def multi_tensor_adam(chunk_size, noop_flag_buffer, tensor_lists, lr, beta1, beta2, epsilon, step, adam_w_mode, + bias_correction, weight_decay, *args): + + if HPUFusedAdam.htcore is None: + from habana_frameworks.torch import core as htcore + from habana_frameworks.torch.utils.internal import is_lazy + HPUFusedAdam.htcore = htcore + HPUFusedAdam.is_lazy_mode = is_lazy() + + htcore = HPUFusedAdam.htcore + + htcore.step_closure._mark_step_if_lazy() + step_size = lr + if bias_correction: + bias_correction1 = 1.0 - pow(beta1, step) + bias_correction2 = 1.0 - pow(beta2, step) + step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 + + neg_step = -step_size + neg_step_t = (torch.tensor([neg_step], dtype=torch.float, + requires_grad=False).to(tensor_lists[1][0].dtype).to(tensor_lists[1][0].device, + non_blocking=True)) + + weight_decay = weight_decay if adam_w_mode else 0 + + # since lr is fed into the kernel as tensor, perform the scalar multiplication of wd here + # NOTE: TODO if lr is updated every step, then we need to convert it as tensor and + # perform weight decay unconditonally. + modified_wd = 1.0 - weight_decay * lr + + if HPUFusedAdam.is_lazy_mode: + torch.ops.hpu.optimizer_adamw( + tensor_lists[0], + tensor_lists[1], + tensor_lists[2], + tensor_lists[3], + neg_step_t, + beta1, + beta2, + epsilon, + modified_wd, + ) + else: + modified_wd_t = (torch.tensor([modified_wd], dtype=torch.float, requires_grad=False).to( + tensor_lists[1][0].dtype).to(tensor_lists[1][0].device, non_blocking=True)) + torch.ops.hpu.optimizer_adamw( + tensor_lists[0], + tensor_lists[1], + tensor_lists[2], + tensor_lists[3], + neg_step_t, + beta1, + beta2, + epsilon, + modified_wd_t, + modified_wd != 1.0, + ) + + htcore.step_closure._mark_step_if_lazy() + + +class FusedAdamBuilder(OpBuilder): BUILD_VAR = "DS_BUILD_FUSED_ADAM" NAME = "fused_adam" @@ -18,12 +93,10 @@ def absolute_name(self): return f'deepspeed.ops.adam.{self.NAME}_op' def sources(self): - return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] - - def cxx_args(self): - args = super().cxx_args() - args += ['-DENABLE_BFLOAT16'] - return args + return [] def include_paths(self): - return ['csrc/includes'] + return [] + + def load(self, verbose=True): + return HPUFusedAdam diff --git a/op_builder/hpu/transformer_inference.py b/op_builder/hpu/transformer_inference.py new file mode 100644 index 000000000000..e397c99200ec --- /dev/null +++ b/op_builder/hpu/transformer_inference.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 +import importlib + +# DeepSpeed Team + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class InferenceBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=self.NAME) + + def absolute_name(self): + return f"deepspeed.ops.transformer.inference.{self.NAME}_op" + + def sources(self): + return [] + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops # noqa: F401 + if installed_ops.get(self.name, False): + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module diff --git a/pre-commit-toggle.sh b/pre-commit-toggle.sh new file mode 100755 index 000000000000..c458c7f2d0da --- /dev/null +++ b/pre-commit-toggle.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Path to the pre-commit configuration file +PRE_COMMIT_CONFIG=".pre-commit-config.yaml" + +# Install pre-commit +pip install -r ${DEEPSPEED_FORK_ROOT}/requirements/requirements-dev.txt + +# Enable pre-commit +function enable_pre_commit() { + # Install pre-commit hooks + pre-commit install +} + +# Disable pre-commit +function disable_pre_commit() { + # Install pre-commit hooks + pre-commit uninstall +} + +# Check if the pre-commit configuration file exists +if [ -f "$PRE_COMMIT_CONFIG" ]; then + echo "Pre-commit configuration file found: $PRE_COMMIT_CONFIG" +else + echo "Pre-commit configuration file not found: $PRE_COMMIT_CONFIG" + exit 1 +fi + +# Check the command-line argument to enable or disable pre-commit +if [ "$1" == "enable" ]; then + enable_pre_commit +elif [ "$1" == "disable" ]; then + disable_pre_commit +else + echo "Usage: ./pre-commit-toggle.sh [enable|disable]" + exit 1 +fi diff --git a/requirements/requirements-sparse_attn.txt b/requirements/requirements-sparse_attn.txt index f929bb0168a5..09386fdcb120 100755 --- a/requirements/requirements-sparse_attn.txt +++ b/requirements/requirements-sparse_attn.txt @@ -1 +1 @@ -triton==1.0.0 +triton==2.0.0.dev20221202 diff --git a/test b/test new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/conftest.py b/tests/conftest.py index 45e8434a021b..8c38ac65b52a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,13 @@ from os.path import abspath, dirname, join import torch import warnings +from unit.ci_promote_marker import * +from unit.xfail_marker import * +from unit.skip_marker import * +from unit.compile_marker import * +from unit.util import get_hpu_dev_version +from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled # Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' @@ -70,6 +77,74 @@ def pytest_runtest_call(item): item.runtest = lambda: True # Dummy function so test is not run twice +def pytest_collection_modifyitems(items, config): + device = get_accelerator().device_name() + gaudi_dev = get_hpu_dev_version() + hpu_lazy_mode = hpu_lazy_enabled() + # Add comipile, CI and Promote marker + for item in items: + if device == 'hpu': + if item._nodeid in compile_tests: + item._pyfuncitem.add_marker(pytest.mark.compile) + if item._nodeid in hpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci) + if item._nodeid in hpu_ci_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci_4cards) + if item._nodeid in gpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_ci) + if item._nodeid in hpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote) + if item._nodeid in hpu_promote_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote_4cards) + if item._nodeid in gpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_promote) + + # Add xfail and SKIP marker + item.user_properties.append(("module_name", item.module.__name__)) + if device == 'hpu': + # Lazy Run + if hpu_lazy_mode: + if item._nodeid in hpu_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_lazy_xfail_tests[item._nodeid])) + if item._nodeid in hpu_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g1_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g2_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_lazy_skip_tests[item._nodeid])) + # Eager Run + else: + if item._nodeid in hpu_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_eager_xfail_tests[item._nodeid])) + if item._nodeid in hpu_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_eager_xfail_tests[item._nodeid])) + if item._nodeid in g1_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_eager_xfail_tests[item._nodeid])) + if item._nodeid in g2_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_eager_skip_tests[item._nodeid])) + else: + if item._nodeid in gpu_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=gpu_xfail_tests[item._nodeid])) + if item._nodeid in gpu_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=gpu_skip_tests[item._nodeid])) + for marker in item.own_markers: + if marker.name in ['skip', 'xfail']: + if 'reason' in marker.kwargs: + item.user_properties.append(("message", marker.kwargs['reason'])) + + # We allow DistributedTest to reuse distributed environments. When the last # test for a class is run, we want to make sure those distributed environments # are destroyed. @@ -85,3 +160,11 @@ def pytest_fixture_setup(fixturedef, request): if getattr(fixturedef.func, "is_dist_fixture", False): dist_fixture_class = fixturedef.func() dist_fixture_class(request) + + +def pytest_runtest_makereport(item, call): + if call.when == 'call': + if call.excinfo: + if not (any('message' in prop for prop in item.user_properties)): + if call.excinfo.value: + item.user_properties.append(("message", call.excinfo.value)) diff --git a/tests/pytest.ini b/tests/pytest.ini index f841c47afc0c..c1b48439ab54 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation" +addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation and not compile" markers = sequential:Tests that need to be run sequentially inference:Inference model tests @@ -11,3 +11,4 @@ markers = world_size:Change world size of individual tests in a class stable_diffusion:Tests that run Stable Diffusion evaluation:Tests that evaluate model correctness + compile: torch.compile tests diff --git a/tests/unit/alexnet_model.py b/tests/unit/alexnet_model.py index cf533063d6ec..ba73ded571c0 100644 --- a/tests/unit/alexnet_model.py +++ b/tests/unit/alexnet_model.py @@ -100,12 +100,24 @@ def cifar_trainset(fp16=False): dist.barrier() if local_rank != 0: dist.barrier() - - data_root = os.getenv("TEST_DATA_DIR", "/tmp/") - trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), - train=True, - download=True, - transform=transform) + if os.getenv("CIFAR10_OFFLINE", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=False, + transform=transform) + elif os.getenv("STORE_CIFAR10", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=True, + transform=transform) + else: + data_root = os.getenv("TEST_DATA_DIR", "/tmp/") + trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), + train=True, + download=True, + transform=transform) if local_rank == 0: dist.barrier() return trainset diff --git a/tests/unit/checkpoint/common.py b/tests/unit/checkpoint/common.py index 7442e51bad5d..2fb2cdfc6ec5 100644 --- a/tests/unit/checkpoint/common.py +++ b/tests/unit/checkpoint/common.py @@ -14,6 +14,7 @@ from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from unit.util import hpu_lazy_enabled from unit.simple_model import * from unittest.mock import MagicMock, patch @@ -85,15 +86,20 @@ def compare_model_states(saved_model, loaded_model, compare_optimizer=True, load def compare_state_dicts(state0, state1, expected_mismatch_keys=[]): - for (k0, s0), (k1, s1) in zip(state0.items(), state1.items()): - assert k0 == k1, f'failure due to key mismatch {k0} != {k1}' - if k0 in expected_mismatch_keys: + key_set0 = set(k for k in state0.keys() if k not in expected_mismatch_keys) + key_set1 = set(k for k in state1.keys() if k not in expected_mismatch_keys) + assert key_set0 == key_set1, f'failure due to key mismatch {key_set0} != {key_set1}' + + for k in key_set0: + s0 = state0[k] + s1 = state1[k] + if k in expected_mismatch_keys: continue if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor): assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}' assert torch.equal(s0.to('cpu'), s1.to('cpu')) else: - assert s0 == s1, f'failures with keys = {k0}, {k1}, values = {type(s0[0])} and {type(s1[0])}' + assert s0 == s1, f'failures with keys = {k}, {k}, values = {s0} and {s1}' def compare_opt_state_dicts(state0, state1, expected_mismatch_keys=[]): @@ -149,6 +155,8 @@ def create_moe_param_groups(model): def create_deepspeed_model(config_dict, model, base_optimizer): + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) ds_model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=create_moe_param_groups(model), diff --git a/tests/unit/checkpoint/test_latest_checkpoint.py b/tests/unit/checkpoint/test_latest_checkpoint.py index 41ce2278680f..9d2147ca1519 100644 --- a/tests/unit/checkpoint/test_latest_checkpoint.py +++ b/tests/unit/checkpoint/test_latest_checkpoint.py @@ -11,6 +11,7 @@ from unit.checkpoint.common import checkpoint_correctness_verification from deepspeed.ops.op_builder import FusedAdamBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -19,7 +20,8 @@ class TestLatestCheckpoint(DistributedTest): world_size = 1 - def test_existing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_existing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -28,6 +30,10 @@ def test_existing_latest(self, tmpdir): "params": { "lr": 0.00015 } + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -41,7 +47,8 @@ def test_existing_latest(self, tmpdir): fp16=False, empty_tag=True) - def test_missing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_missing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -50,6 +57,10 @@ def test_missing_latest(self, tmpdir): "params": { "lr": 0.00015 } + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 diff --git a/tests/unit/checkpoint/test_lr_scheduler.py b/tests/unit/checkpoint/test_lr_scheduler.py index c4c6773cd474..929539215660 100644 --- a/tests/unit/checkpoint/test_lr_scheduler.py +++ b/tests/unit/checkpoint/test_lr_scheduler.py @@ -8,18 +8,18 @@ from unit.common import DistributedTest from unit.simple_model import * - from unit.checkpoint.common import checkpoint_correctness_verification - +from deepspeed.accelerator import get_accelerator import pytest +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False), (3, True)]) class TestLRSchedulerCheckpoint(DistributedTest): world_size = 2 - def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -49,14 +49,20 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -66,9 +72,10 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=True) + load_lr_scheduler_states=True, + fp16=fp16) - def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -96,11 +103,18 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): "warmup_num_steps": 1000 } }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() + } } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None + if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -110,4 +124,5 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=False) + load_lr_scheduler_states=False, + fp16=fp16) diff --git a/tests/unit/checkpoint/test_mics_optimizer.py b/tests/unit/checkpoint/test_mics_optimizer.py index 3f853cd5c13a..c7758fe07520 100644 --- a/tests/unit/checkpoint/test_mics_optimizer.py +++ b/tests/unit/checkpoint/test_mics_optimizer.py @@ -12,7 +12,6 @@ from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import * - import pytest if not required_torch_version(max_version=2.0): @@ -46,7 +45,6 @@ def _toy_model_config(self, shard_size): "mics_shard_size": shard_size } } - hidden_dim = 10 with deepspeed.zero.MiCS_Init(config_dict_or_path=config_dict): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] diff --git a/tests/unit/checkpoint/test_moe_checkpoint.py b/tests/unit/checkpoint/test_moe_checkpoint.py index 0706b7327ce8..2702dc106152 100644 --- a/tests/unit/checkpoint/test_moe_checkpoint.py +++ b/tests/unit/checkpoint/test_moe_checkpoint.py @@ -8,8 +8,9 @@ from unit.common import DistributedTest from unit.simple_model import * - +from deepspeed.accelerator import get_accelerator from unit.checkpoint.common import checkpoint_correctness_verification +from unit.util import hpu_lazy_enabled import pytest @@ -24,7 +25,7 @@ def test_checkpoint_moe(self, tmpdir, ep_size): config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 - + fp16 = config_dict["fp16"]["enabled"] models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] optimizers = [torch.optim.AdamW(params=model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -33,13 +34,14 @@ def test_checkpoint_moe(self, tmpdir, ep_size): tmpdir=tmpdir, load_optimizer_states=True, load_lr_scheduler_states=False, - fp16=config_dict["fp16"]["enabled"], + fp16=fp16, empty_tag=True, base_optimizers=optimizers, seq_dataloader=True) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)]) - def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): + def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states, compile_mode): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -61,11 +63,20 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): }, "zero_optimization": { "stage": 2, + "reduce_scatter": True + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 16 + fp16 = config_dict["fp16"]["enabled"] models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] # param group must have a random unique name (for now) # TODO: clean-up this requirement, the unique name should not be required here param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models] @@ -77,7 +88,7 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): tmpdir=tmpdir, load_optimizer_states=load_optim_states, load_lr_scheduler_states=False, - fp16=config_dict["fp16"]["enabled"], + fp16=fp16, empty_tag=True, base_optimizers=optimizers, seq_dataloader=True) diff --git a/tests/unit/checkpoint/test_other_optimizer.py b/tests/unit/checkpoint/test_other_optimizer.py index 9cb8c4286880..50c76322278d 100644 --- a/tests/unit/checkpoint/test_other_optimizer.py +++ b/tests/unit/checkpoint/test_other_optimizer.py @@ -8,9 +8,7 @@ from unit.common import DistributedTest from unit.simple_model import * - from unit.checkpoint.common import checkpoint_correctness_verification - import pytest @@ -18,7 +16,8 @@ class TestOtherOptimizerCheckpoint(DistributedTest): world_size = 2 @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") - def test_checkpoint_unfused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unfused_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -47,9 +46,13 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } - + fp16 = True args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -59,16 +62,19 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + fp16=fp16) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + fp16=fp16) - def test_checkpoint_fused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fused_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -83,8 +89,13 @@ def test_checkpoint_fused_optimizer(self, tmpdir): }, "fp16": { "enabled": True + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } + fp16 = True args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 @@ -95,16 +106,19 @@ def test_checkpoint_fused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + fp16=fp16) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + fp16=fp16) - def test_checkpoint_fp32_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fp32_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -119,6 +133,10 @@ def test_checkpoint_fp32_optimizer(self, tmpdir): }, "fp16": { "enabled": False + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } diff --git a/tests/unit/checkpoint/test_pipeline.py b/tests/unit/checkpoint/test_pipeline.py index 99f1ba2ec433..e199b8a45d71 100644 --- a/tests/unit/checkpoint/test_pipeline.py +++ b/tests/unit/checkpoint/test_pipeline.py @@ -8,15 +8,15 @@ from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification from unit.util import skip_on_arch - import pytest class TestPipelineCheckpoint(DistributedTest): world_size = 4 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("zero_stage", [0, 1]) - def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): + def test_checkpoint_pipe_engine(self, zero_stage, compile_mode, tmpdir): skip_on_arch(min_arch=7) config_dict = { @@ -50,15 +50,20 @@ def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } + fp16 = config_dict['fp16']['enabled'] models = [LinearStackPipe(num_stages=2) for _ in range(2)] checkpoint_correctness_verification(config_dict=config_dict, models=models, hidden_dim=models[0].hidden_dim, tmpdir=tmpdir, - fp16=config_dict['fp16']['enabled'], + fp16=fp16, load_optimizer_states=True, load_lr_scheduler_states=True, train_batch=True) diff --git a/tests/unit/checkpoint/test_shared_weights.py b/tests/unit/checkpoint/test_shared_weights.py index ed69073fb81c..6d6e8d37a502 100644 --- a/tests/unit/checkpoint/test_shared_weights.py +++ b/tests/unit/checkpoint/test_shared_weights.py @@ -7,8 +7,11 @@ import torch.nn as nn import deepspeed +import pytest from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from unit.common import DistributedTest +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator class ModelWithSharedWeights(nn.Module): @@ -25,17 +28,24 @@ def __init__(self): class TestCheckpointSharedWeights(DistributedTest): world_size = 2 - def test_checkpoint_shared_weights(self, tmp_path): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_shared_weights(self, tmp_path, compile_mode): config = { "train_micro_batch_size_per_gpu": 2, "zero_allow_untested_optimizer": True, "zero_optimization": { "stage": 2 }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() + } } model = ModelWithSharedWeights() + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) - deepspeed_engine, _, _, _ = deepspeed.initialize( config=config, model=model, diff --git a/tests/unit/checkpoint/test_tag_validation.py b/tests/unit/checkpoint/test_tag_validation.py index b164c31e52b0..b61dcc2f2f8a 100644 --- a/tests/unit/checkpoint/test_tag_validation.py +++ b/tests/unit/checkpoint/test_tag_validation.py @@ -7,15 +7,16 @@ from unit.common import DistributedTest from unit.simple_model import * - +from deepspeed.accelerator import get_accelerator import pytest class TestCheckpointValidationTag(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"]) - def test_checkpoint_unique_tag(self, tmpdir, valid_mode): + def test_checkpoint_unique_tag(self, tmpdir, valid_mode, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -27,6 +28,10 @@ def test_checkpoint_unique_tag(self, tmpdir, valid_mode): }, "checkpoint": { "tag_validation": valid_mode + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -39,7 +44,8 @@ def test_checkpoint_unique_tag(self, tmpdir, valid_mode): else: model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") - def test_checkpoint_unknown_tag_validation(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unknown_tag_validation(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, @@ -52,6 +58,10 @@ def test_checkpoint_unknown_tag_validation(self, tmpdir): }, "checkpoint": { "tag_validation": "foo" + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 diff --git a/tests/unit/checkpoint/test_universal_checkpoint.py b/tests/unit/checkpoint/test_universal_checkpoint.py new file mode 100644 index 000000000000..7adfe8410b55 --- /dev/null +++ b/tests/unit/checkpoint/test_universal_checkpoint.py @@ -0,0 +1,215 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import deepspeed +from types import SimpleNamespace +from torch.utils._pytree import tree_map + +from deepspeed.runtime.utils import required_torch_version +from deepspeed.checkpoint import UNIVERSAL_CHECKPOINT_INFO +from deepspeed.checkpoint.ds_to_universal import main as convert_to_universal + +from unit.common import DistributedTest, DistributedFixture +from unit.simple_model import * +from unit.util import bf16_required_version_check + +from unit.checkpoint.common import compare_opt_state_dicts, compare_state_dicts + +import pytest +import deepspeed.comm as dist + + +def get_expected_mismatch_keys(): + # torch 1.2.* stores raw tensor id numbers in checkpoint state which leads to + # false positive mismatches in checkpoint state comparisons. + # Newer torch versions store tensor ids as 0, 1, 2, ... + return [] if required_torch_version(min_version=1.4) else ['params'] + + +def maybe_step(t): + return not torch.is_tensor(t) or (t.device.type == 'cpu' and t.numel() == 1) + + +def gather_opt_state(optimizer_state): + + def gather_tensor(t): + + if maybe_step(t): + return t + else: + buffer = [torch.zeros_like(t.flatten()) for _ in range(dist.get_world_size())] + dist.all_gather(buffer, t.flatten()) + return torch.cat(buffer) + + return tree_map(gather_tensor, optimizer_state) + + +def remove_pad_in_opt_state(optimizer_state, num_params): + + def remove_pad(t): + if maybe_step(t): + return t + else: + return t[:num_params] + + return tree_map(remove_pad, optimizer_state) + + +CP_TAG = "test_tag" + + +def init_ds_engine(model, ds_config, use_torch_adam): + + if use_torch_adam: + ds_optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + del ds_config["optimizer"] + model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, optimizer=ds_optimizer) + else: + model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + + return model + + +def train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, tmpdir): + if dtype == torch.bfloat16 and not bf16_required_version_check(): + return + + test_step = 8 + + model = SimpleModel(hidden_dim) + model = init_ds_engine(model, ds_config, use_torch_adam) + data_loader = random_dataloader(model=model, + total_samples=test_step, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) + for batch in data_loader: + loss = model(batch[0], batch[1]) + model.backward(loss) + model.step() + + sd = model.optimizer.optimizer.state_dict() if load_optim else None + + client_state = {} + client_state[UNIVERSAL_CHECKPOINT_INFO] = {} + client_state['iteration'] = test_step + model.save_checkpoint(tmpdir, tag=CP_TAG, client_state=client_state) + + cp_dir = os.path.join(tmpdir, CP_TAG) + univ_cp_dir = f"{cp_dir}_universal" + + args = SimpleNamespace(input_folder=cp_dir, + output_folder=univ_cp_dir, + num_extract_workers=1, + num_merge_workers=1, + keep_temp_folder=False, + strict=True) + + dist.barrier() + if dist.get_rank() == 0: + convert_to_universal(args) + + model_state = model.state_dict() + optimizer_state = None + if load_optim: + optimizer_state = gather_opt_state(model.optimizer.optimizer.state_dict()) + + if dist.get_rank() == 0: + torch.save((model_state, optimizer_state), os.path.join(tmpdir, "baseline_state.pt")) + + dist.barrier() + + return model, sd + + +@pytest.fixture +def ds_config(zero_stage, dtype): + ds_config = { + "train_batch_size": 8, + "optimizer": { + "type": 'Adam' + }, + "zero_optimization": { + "stage": zero_stage, + } + } + if dtype == torch.float16: + ds_config["fp16"] = {"enabled": True, "initial_scale_power": 8} + elif dtype == torch.bfloat16: + ds_config["bf16"] = {"enabled": True} + return ds_config + + +class _baseline(DistributedFixture): + world_size = None + + def run(self, tmpdir, ds_config, zero_stage, dtype, load_optim, use_torch_adam): + hidden_dim = 10 + train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, tmpdir) + + +class baseline_ws2(_baseline): + world_size = 2 + + +class baseline_ws4(_baseline): + world_size = 4 + + +@pytest.mark.parametrize('dtype', [torch.bfloat16, torch.float16, torch.float32]) +@pytest.mark.parametrize("zero_stage", [1]) +@pytest.mark.parametrize("use_torch_adam", [False, True]) +@pytest.mark.parametrize("load_optim", [False, True]) +class TestZeROUniversalCheckpointDP(DistributedTest): + + def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + if dtype == torch.bfloat16 and not bf16_required_version_check(): + pytest.skip( + " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" + ) + + hidden_dim = 10 + loaded_model_state, loaded_optimizer_state = torch.load(f"{tmpdir}/baseline_state.pt") + + ds_config["checkpoint"] = {"load_universal": True} + univ_model = SimpleModel(hidden_dim) + univ_model = init_ds_engine(univ_model, ds_config, use_torch_adam) + univ_model.load_checkpoint(tmpdir, tag=f"{CP_TAG}_universal", load_optimizer_states=load_optim) + + model_state = univ_model.state_dict() + compare_state_dicts(model_state, loaded_model_state) + + if load_optim: + optimizer_state = gather_opt_state(univ_model.optimizer.optimizer.state_dict()) + # padding sizes may differ when dp sizes are different + param_count = sum(p.numel() for p in univ_model.parameters()) + optimizer_state = remove_pad_in_opt_state(optimizer_state, param_count) + loaded_optimizer_state = remove_pad_in_opt_state(loaded_optimizer_state, param_count) + + compare_opt_state_dicts(optimizer_state, loaded_optimizer_state, get_expected_mismatch_keys()) + + # Run training again to verify that the optimizer has necessary states + test_step = 8 + data_loader = random_dataloader(model=univ_model, + total_samples=test_step, + hidden_dim=hidden_dim, + device=univ_model.device, + dtype=dtype) + for batch in data_loader: + loss = univ_model(batch[0], batch[1]) + univ_model.backward(loss) + univ_model.step() + + @pytest.mark.world_size(2) + def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + + @pytest.mark.world_size(2) + def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + + @pytest.mark.world_size(4) + def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) diff --git a/tests/unit/checkpoint/test_zero_optimizer.py b/tests/unit/checkpoint/test_zero_optimizer.py index 0b9efb3ec462..536a0d24fbd8 100644 --- a/tests/unit/checkpoint/test_zero_optimizer.py +++ b/tests/unit/checkpoint/test_zero_optimizer.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * +from unit.util import hpu_lazy_enabled from unit.checkpoint.common import * @@ -21,8 +22,9 @@ class TestZeROCheckpoint(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [3]) - def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): + def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -35,20 +37,24 @@ def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): "zero_optimization": { "stage": zero_stage, "pipeline_loading_checkpoint": True, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -72,23 +78,38 @@ def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_op "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + fp16=fp16, + load_optimizer_states=True) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -110,22 +131,37 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, ada "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + fp16=fp16, + load_optimizer_states=False) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_hybrid_optimizer_state(self, tmpdir, zero_stage): + def test_hybrid_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, @@ -137,10 +173,19 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): "fp16": { "enabled": True, "initial_scale_power": 8 + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 + fp16 = True + models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] optimizers = [HybridStateOptimizer(model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -148,10 +193,12 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): base_optimizers=optimizers, hidden_dim=hidden_dim, tmpdir=tmpdir, + fp16=fp16, load_optimizer_states=True) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -163,23 +210,32 @@ def test_load_module_only(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, fp16=fp16, load_module_only=True) class ws4_model_checkpoint(DistributedFixture): world_size = 4 - def run(self, class_tmpdir, elastic_save, load_optim): + def run(self, class_tmpdir, elastic_save, load_optim, compile_mode): ds_config = { "train_batch_size": 4, "optimizer": { @@ -192,13 +248,22 @@ def run(self, class_tmpdir, elastic_save, load_optim): "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_save + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 model = SimpleModel(hidden_dim) + dtype = torch.half model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=8, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -209,13 +274,14 @@ def run(self, class_tmpdir, elastic_save, load_optim): model.save_checkpoint(class_tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("elastic_save", [True, False]) @pytest.mark.parametrize("elastic_load", [True, False]) @pytest.mark.parametrize("load_optim", [True, False]) class TestZeROElasticCheckpoint(DistributedTest): world_size = 2 - def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim): + def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim, compile_mode): ds_config = { "train_batch_size": 2, "optimizer": { @@ -228,6 +294,10 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_save + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -237,10 +307,13 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l # Newer torch versions store tensor ids as 0, 1, 2, ... expected_mismatch_keys = [] if required_torch_version(min_version=1.4) else ['params'] models = [SimpleModel(hidden_dim) for _ in range(2)] + + dtype = torch.half model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[0], model_parameters=models[0].parameters()) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + run_steps = 8 + data_loader = random_dataloader(model=model, hidden_dim=hidden_dim, device=model.device, dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -261,14 +334,18 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l curr_sd = model.optimizer.optimizer.state_dict() compare_opt_state_dicts(curr_sd, saved_sd, expected_mismatch_keys) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=8, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load, - load_optim): + load_optim, compile_mode): ds_config = { "train_batch_size": 4, "optimizer": { @@ -281,6 +358,10 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_load + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -295,11 +376,12 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROSaveLoadEdgeCase(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_immediate_save_load(self, tmpdir, zero_stage): + def test_immediate_save_load(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -311,6 +393,10 @@ def test_immediate_save_load(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -324,7 +410,7 @@ def test_immediate_save_load(self, tmpdir, zero_stage): load_module_only=False) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_immediate_save(self, tmpdir, zero_stage): + def test_load_immediate_save(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -336,6 +422,10 @@ def test_load_immediate_save(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -366,7 +456,7 @@ def test_load_immediate_save(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): + def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage, compile_mode): config_dict = { "optimizer": { "type": 'Adam' @@ -379,6 +469,10 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): "stage": zero_stage, "stage3_gather_fp16_weights_on_model_save": True, }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() + }, "gradient_accumulation_steps": 2, "train_micro_batch_size_per_gpu": 1, "train_batch_size": 4, @@ -386,6 +480,7 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + dtype = torch.half # This test reproduces a bug where one tries to retrieve a 16bit model before grad_accum # cycle was completed. # So we config grad_accum=2 and step only once and save_16bit_model @@ -395,7 +490,7 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): total_samples=2, hidden_dim=hidden_dim, device=ds_model.device, - dtype=torch.half) + dtype=dtype) batch = next(iter(data_loader)) loss = ds_model(batch[0], batch[1]) @@ -411,11 +506,12 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROCheckpointFrozenWeights(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_optimizer_state(self, tmpdir, zero_stage): + def test_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -436,17 +532,32 @@ def test_load_optimizer_state(self, tmpdir, zero_stage): "wall_clock_breakdown": True, "zero_optimization": { "stage": zero_stage + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } + fp16 = True + dtype = None + hidden_dim = 10 - with deepspeed.zero.Init(enabled=zero_stage == 3): + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + fp16=fp16) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -465,17 +576,30 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 - - with deepspeed.zero.Init(enabled=zero_stage == 3): + fp16 = True + dtype = None + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + fp16=fp16) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -487,17 +611,25 @@ def test_load_module_only(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 - - with deepspeed.zero.Init(enabled=zero_stage == 3): + fp16 = True + dtype = None + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True, fp16=fp16) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -510,6 +642,10 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -545,7 +681,7 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): assert loaded_trainable_param_names == trainable_param_names @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -558,6 +694,10 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } hidden_dim = 10 @@ -589,9 +729,10 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): class TestSaveTensorClone(DistributedTest): world_size = 1 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) @pytest.mark.parametrize('use_cpu_device', [True, False]) - def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): + def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device, compile_mode): ds_config = { "optimizer": { @@ -600,11 +741,16 @@ def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): "zero_optimization": { "stage": zero_stage }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() + }, "train_batch_size": 1, "train_micro_batch_size_per_gpu": 1 } + dtype = torch.float16 hidden_dim = 1024 - model = SimpleModel(hidden_dim, nlayers=4).half() + model = SimpleModel(hidden_dim, nlayers=4).to(dtype=dtype) ref_model_state_dict = model.state_dict() ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) @@ -624,8 +770,9 @@ class TestZeRONonDistributed(DistributedTest): world_size = 1 init_distributed = False + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_chmod_exception_handling(self, monkeypatch, zero_stage): + def test_chmod_exception_handling(self, monkeypatch, zero_stage, compile_mode): config_dict = { "optimizer": { @@ -634,6 +781,10 @@ def test_chmod_exception_handling(self, monkeypatch, zero_stage): "train_batch_size": 1, "zero_optimization": { "stage": zero_stage + }, + "compile": { + "enabled": compile_mode, + "backend": get_accelerator().get_compile_backend() } } args = SimpleNamespace(local_rank=0) diff --git a/tests/unit/ci_promote_marker.py b/tests/unit/ci_promote_marker.py new file mode 100644 index 000000000000..7bd83195723a --- /dev/null +++ b/tests/unit/ci_promote_marker.py @@ -0,0 +1,606 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]" +] + +hpu_ci_tests_4cards = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]", + "unit/comm/test_dist.py::TestDistInitNoEnv::test", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_throughput_calculation", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-fp32-zero1]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1]", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/half_precision/test_fp16.py::TestFP16OptimizerForMoE::test_unfused_gradnorm", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16]" +] + +hpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] + +hpu_promote_tests_4cards = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-fp16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16]" +] + +gpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-noCG]" +] + +gpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/compression/test_compression.py::TestCompression::test_conv1d_convertion", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] diff --git a/tests/unit/common.py b/tests/unit/common.py index 76bebf6b725a..bd2d519d65e7 100644 --- a/tests/unit/common.py +++ b/tests/unit/common.py @@ -21,9 +21,10 @@ import pytest from _pytest.outcomes import Skipped from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker +from unit.util import hpu_lazy_enabled # Worker timeout for tests that hang -DEEPSPEED_TEST_TIMEOUT = 600 +DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DEEPSPEED_TEST_TIMEOUT', '600')) def is_rocm_pytorch(): @@ -59,6 +60,7 @@ def get_master_port(base_port=29500, port_range_size=1000): def set_accelerator_visible(): + # below function relevant for GPU cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) xdist_worker_id = get_xdist_worker_id() if xdist_worker_id is None: @@ -84,13 +86,25 @@ def set_accelerator_visible(): elif get_accelerator().device_name() == 'npu': npu_smi = subprocess.check_output(['npu-smi', 'info', '-l']) num_accelerators = int(npu_smi.decode('utf-8').strip().split('\n')[0].split(':')[1].strip()) + elif get_accelerator().device_name() == 'hpu': + try: + hl_smi = subprocess.check_output(['hl-smi', "-L"]) + num_accelerators = re.findall(r"Module ID\s+:\s+(\d+)", hl_smi.decode()) + except FileNotFoundError: + sim_list = subprocess.check_output(['ls', '-1', '/dev/accel']) + num_accelerators = re.findall(r"accel(\d+)", sim_list.decode()) + num_accelerators = sorted(num_accelerators, key=int) + os.environ["HABANA_VISIBLE_MODULES"] = ",".join(num_accelerators) else: assert get_accelerator().device_name() == 'cpu' cpu_sockets = int( subprocess.check_output('cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True)) num_accelerators = cpu_sockets - cuda_visible = ",".join(map(str, range(num_accelerators))) + if isinstance(num_accelerators, list): + cuda_visible = ",".join(num_accelerators) + else: + cuda_visible = ",".join(map(str, range(num_accelerators))) # rotate list based on xdist worker id, example below # wid=0 -> ['0', '1', '2', '3'] @@ -114,6 +128,9 @@ class DistributedExec(ABC): requires_cuda_env = True reuse_dist_env = False non_daemonic_procs = False + #WA to SW-181871, until it is not fixed set non_daemonic_procs to True for eager mode. + if get_accelerator().device_name() == 'hpu': + non_daemonic_procs = not hpu_lazy_enabled() _pool_cache = {} exec_timeout = DEEPSPEED_TEST_TIMEOUT @@ -147,6 +164,10 @@ def _get_fixture_kwargs(self, request, func): return fixture_kwargs def _launch_daemonic_procs(self, num_procs): + if get_accelerator().device_name() == 'hpu': + if self.reuse_dist_env: + print("Ignoring reuse_dist_env for hpu") + self.reuse_dist_env = False # Create process pool or use cached one master_port = None if self.reuse_dist_env: @@ -170,8 +191,9 @@ def _launch_daemonic_procs(self, num_procs): # hang (causing super long unit test runtimes) pytest.exit("Test hanged, exiting", returncode=1) - # Tear down distributed environment and close process pools - self._close_pool(pool, num_procs) + finally: + # Tear down distributed environment and close process pools + self._close_pool(pool, num_procs) # If we skipped a test, propagate that to this process if any(skip_msgs): @@ -179,6 +201,12 @@ def _launch_daemonic_procs(self, num_procs): pytest.skip(skip_msgs[0]) def _launch_non_daemonic_procs(self, num_procs): + #WA to SW-181871 + if get_accelerator().device_name() == 'hpu': + if self.reuse_dist_env: + print("Ignoring reuse_dist_env for hpu") + self.reuse_dist_env = False + assert not self.reuse_dist_env, "Cannot reuse distributed environment with non-daemonic processes" master_port = get_master_port() @@ -284,6 +312,7 @@ def _dist_run(self, local_rank, num_procs, master_port, skip_msg=""): def _dist_destroy(self): if (dist is not None) and dist.is_initialized(): dist.barrier() + # tear down after test completes dist.destroy_process_group() def _close_pool(self, pool, num_procs, force=False): diff --git a/tests/unit/compile_marker.py b/tests/unit/compile_marker.py new file mode 100644 index 000000000000..f546ac313fb8 --- /dev/null +++ b/tests/unit/compile_marker.py @@ -0,0 +1,139 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +compile_tests = [ + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True] PASSED", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True] PASSED", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unknown_tag_validation[True]", +] diff --git a/tests/unit/elasticity/test_elastic.py b/tests/unit/elasticity/test_elastic.py index a49ec595a420..dbd4f40c9b95 100644 --- a/tests/unit/elasticity/test_elastic.py +++ b/tests/unit/elasticity/test_elastic.py @@ -9,7 +9,7 @@ from deepspeed.git_version_info import version as ds_version import os from unit.simple_model import SimpleModel -from deepspeed.ops.op_builder import FusedAdamBuilder +from deepspeed.ops.op_builder import FusedAdamBuilder, FusedLambBuilder if not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -150,6 +150,7 @@ def test_proper_mbsz(ds_config): class TestNonElasticBatchParams(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, @@ -182,6 +183,7 @@ def test(self): class TestNonElasticBatchParamsWithOverride(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, @@ -213,6 +215,7 @@ def test(self): class TestElasticConfigChanged(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, diff --git a/tests/unit/hybrid_engine/test_he_all.py b/tests/unit/hybrid_engine/test_he_all.py index aa1f120645b1..af336ebf8c2f 100644 --- a/tests/unit/hybrid_engine/test_he_all.py +++ b/tests/unit/hybrid_engine/test_he_all.py @@ -43,8 +43,10 @@ def get_model(self, model_name): model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) - model = model.half() - model = model.to(f'{get_accelerator().device_name()}:{local_rank}') + dev = get_accelerator().device_name() + dtype = torch.float16 + model = model.to(dtype=dtype) + model = model.to(f'{dev}:{local_rank}') return model def get_tokenizer(self, model_name): @@ -70,8 +72,8 @@ def test_correctness(self, batch_size, model_name): base_out = self._generate(model, tokenizer, prompt) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} - model, *_ = deepspeed.initialize(model=model, config=ds_config) + model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}" diff --git a/tests/unit/hybrid_engine/test_he_llama.py b/tests/unit/hybrid_engine/test_he_llama.py index fcf5b8ffb89b..716c327d53af 100644 --- a/tests/unit/hybrid_engine/test_he_llama.py +++ b/tests/unit/hybrid_engine/test_he_llama.py @@ -46,8 +46,10 @@ def get_model(self, model_name): model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) # Make the model smaller so we can run it on a single GPU in CI _ = [model.model.layers.pop(-1) for _ in range(8)] - model = model.half() - model = model.to(f'{get_accelerator().device_name()}:{local_rank}') + dev = get_accelerator().device_name() + dtype = torch.float16 + model = model.to(dtype=dtype) + model = model.to(f'{dev}:{local_rank}') return model def get_tokenizer(self, model_name): diff --git a/tests/unit/hybrid_engine/test_he_lora.py b/tests/unit/hybrid_engine/test_he_lora.py index ea27239ed55e..0108d285d46f 100644 --- a/tests/unit/hybrid_engine/test_he_lora.py +++ b/tests/unit/hybrid_engine/test_he_lora.py @@ -15,6 +15,7 @@ import numpy.testing as npt from unit.common import DistributedTest from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -119,13 +120,17 @@ def only_optimize_lora_parameters(model): class TestHybridEngineLoRA(DistributedTest): world_size = 1 - def get_model(self, model_name): + def get_model(self, model_name, replace_fp16_to_bf16=False): local_rank = int(os.getenv("LOCAL_RANK", "0")) model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) - model = model.half() - model = model.to(f'cuda:{local_rank}') + if replace_fp16_to_bf16: + model = model.bfloat16() + else: + model = model.half() + device = get_accelerator().device_name() + model = model.to(f'{device}:{local_rank}') return model def get_tokenizer(self, model_name): @@ -146,8 +151,9 @@ def get_train_sentences(self, batch_size): raise NotImplementedError(f"batch_size {batch_size} not implemented") def test_lora(self, batch_size, model_name, zero_stage, offload_device): + replace_fp16_to_bf16 = False local_rank = int(os.getenv("LOCAL_RANK", "0")) - model = self.get_model(model_name) + model = self.get_model(model_name, replace_fp16_to_bf16) tokenizer = self.get_tokenizer(model_name) train_sentences = self.get_train_sentences(batch_size) @@ -180,6 +186,9 @@ def test_lora(self, batch_size, model_name, zero_stage, offload_device): } } + if replace_fp16_to_bf16: + ds_config["fp16"]["enabled"] = False + ds_config["bf16"] = {"enabled": True} model, *_ = deepspeed.initialize(model=model, config=ds_config) # Verify gradient norm is larger than 0 @@ -190,7 +199,8 @@ def test_lora(self, batch_size, model_name, zero_stage, offload_device): model.train() batch = tokenizer(train_sentences, max_length=16, padding="max_length", truncation=True, return_tensors="pt") - batch = to_device(batch, f'cuda:{local_rank}') + device = get_accelerator().device_name() + batch = to_device(batch, f'{device}:{local_rank}') batch["labels"] = batch["input_ids"] outputs = model(**batch, use_cache=False) loss = outputs.loss diff --git a/tests/unit/inference/test_checkpoint_sharding.py b/tests/unit/inference/test_checkpoint_sharding.py index 564b3fab6bf4..dc4614cfae3e 100644 --- a/tests/unit/inference/test_checkpoint_sharding.py +++ b/tests/unit/inference/test_checkpoint_sharding.py @@ -14,6 +14,7 @@ from huggingface_hub import snapshot_download from transformers.utils import is_offline_mode from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -44,6 +45,8 @@ def model_name(request): @pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"]) def dtype(request): + if request.param not in get_accelerator().supported_dtypes(): + pytest.skip(f"{request.param} not supported by {get_accelerator().device_name()}.") return request.param @@ -74,6 +77,7 @@ class TestCheckpointShard(DistributedTest): world_size = 2 def test(self, model_name, dtype, class_tmpdir, save_shard): + world_size = int(os.getenv("WORLD_SIZE", "1")) inf_config = { "replace_with_kernel_inject": True, diff --git a/tests/unit/inference/test_inference.py b/tests/unit/inference/test_inference.py index f3056a225a9b..7a34cb3acdee 100644 --- a/tests/unit/inference/test_inference.py +++ b/tests/unit/inference/test_inference.py @@ -6,23 +6,19 @@ import pytest import itertools -import pickle import os import time -from dataclasses import dataclass -from typing import List - +import json import deepspeed import torch -from huggingface_hub import HfApi +from huggingface_hub import HfApi, hf_api from packaging import version as pkg_version from torch import nn from transformers import pipeline from transformers.models.t5.modeling_t5 import T5Block from transformers.models.roberta.modeling_roberta import RobertaLayer - from deepspeed.accelerator import get_accelerator from deepspeed.git_version_info import torch_info from deepspeed.model_implementations import DeepSpeedTransformerInference @@ -30,6 +26,7 @@ from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest +from transformers import BertLayer rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): @@ -67,65 +64,41 @@ "facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture. "facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants. ] +if os.getenv("TRANSFORMERS_OFFLINE", default=None): + if os.getenv("HF_HOME", default=None): + model_info_f = os.path.join(os.getenv("HF_HOME", default=None), 'model_info.json') + with open(model_info_f, 'r') as f: + data = json.load(f) + _all_models = [hf_api.ModelInfo(**x) for x in data] + else: + assert 1 +elif os.getenv("STORE_HF", default=None): + if os.getenv("HF_HOME", default=None): + _all_models = list(HfApi().list_models()) + all_models_info = [model_info.__dict__ for model_info in _all_models] + json_object = json.dumps(all_models_info, indent=4) + model_info_f = os.path.join(os.getenv("HF_HOME", default=None), 'model_info.json') + with open(model_info_f, 'w') as f: + f.write(json_object) +else: + _all_models = list(HfApi().list_models()) + _test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models) +_hf_model_names = [m.modelId for m in _all_models] _test_tasks = [ "fill-mask", "question-answering", "text-classification", "token-classification", "text-generation", "text2text-generation", "summarization", "translation" ] - - -@dataclass -class ModelInfo: - modelId: str - pipeline_tag: str - tags: List[str] - - -def _hf_model_list() -> List[ModelInfo]: - """ Caches HF model list to avoid repeated API calls """ - - cache_dir = os.getenv("TRANSFORMERS_CACHE", "~/.cache/huggingface") - cache_file_path = os.path.join(cache_dir, "DS_model_cache.pkl") - cache_expiration_seconds = 60 * 60 * 24 # 1 day - - # Load or initialize the cache - model_data = {"cache_time": 0, "model_list": []} - if os.path.isfile(cache_file_path): - with open(cache_file_path, 'rb') as f: - model_data = pickle.load(f) - - current_time = time.time() - - # Update the cache if it has expired - if (model_data["cache_time"] + cache_expiration_seconds) < current_time: - api = HfApi() - model_data["model_list"] = [ - ModelInfo(modelId=m.modelId, pipeline_tag=m.pipeline_tag, tags=m.tags) for m in api.list_models() - ] - model_data["cache_time"] = current_time - - # Save the updated cache - os.makedirs(cache_dir, exist_ok=True) - with open(cache_file_path, 'wb') as f: - pickle.dump(model_data, f) - - return model_data["model_list"] - - -# Get a list of all models and mapping from task to supported models -_hf_models = _hf_model_list() -_hf_model_names = [m.modelId for m in _hf_models] -_hf_task_to_models = {task: [m.modelId for m in _hf_models if m.pipeline_tag == task] for task in _test_tasks} - +_hf_task_to_models = {task: [m.modelId for m in _all_models if m.pipeline_tag == task] for task in _test_tasks} # Get all combinations of task:model to test _model_w_tasks = [(m, t) for m, t in itertools.product(*[_test_models, _test_tasks]) if m in _hf_task_to_models[t]] - # Assign to pytest variables for testing pytest.model_w_tasks = _model_w_tasks pytest.mt_names = [f"{m}-{t}" for m, t in pytest.model_w_tasks] -@pytest.fixture(scope="module", autouse=True) +#Hugging Face model: WA. Hugging Face models were updated, causing the _test_models list to not be found in _hf_model_names. Changed the fixture from True to False. +@pytest.fixture(scope="module", autouse=False) def verify_models(): # Verify all test models are registered in HF _test_models_not_found = [m for m in _test_models if m not in _hf_model_names] @@ -147,8 +120,17 @@ def model_w_task(request): return request.param -@pytest.fixture(params=[torch.float, torch.half], ids=["fp32", "fp16"]) +dtype_params = [torch.float, torch.half] +dtype_ids = ["fp32", "fp16"] +if get_accelerator().is_bf16_supported(): + dtype_params.append(torch.bfloat16) + dtype_ids.append("bf16") + + +@pytest.fixture(params=dtype_params, ids=dtype_ids) def dtype(request): + if request.param not in get_accelerator().supported_dtypes(): + pytest.skip(f"{request.param} not supported by {get_accelerator().device_name()}.") return request.param @@ -279,7 +261,7 @@ def verify_injection(module): def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): model, task = model_w_task msg = "" - if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"): + if enable_cuda_graph and (torch_info["cuda_version"] == "0.0") and get_accelerator().device_name() != 'hpu': msg = "CUDA not detected, cannot use CUDA Graph" elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): msg = "CUDA Graph is only available in torch versions >= 1.10" @@ -296,6 +278,8 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): msg = f"Bloom models only support half precision, cannot use dtype {dtype}" elif (model not in _bert_models + _roberta_models) and enable_cuda_graph: msg = "Non bert/roberta models do no support CUDA Graph" + elif not get_accelerator().is_triton_supported() and enable_triton: + msg = f"Triton is not supported for {get_accelerator().device_name()}." elif enable_triton and not (dtype in [torch.half]): msg = "Triton is for fp16" elif enable_triton and not deepspeed.HAS_TRITON: @@ -311,7 +295,7 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): return msg -@pytest.mark.inference +@pytest.mark.nightly class TestModelTask(DistributedTest): world_size = 1 @@ -340,7 +324,7 @@ def test( local_rank = int(os.getenv("LOCAL_RANK", "0")) # Load the model on CPU first to avoid OOM for large models @fp32 - pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt") + pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt", torch_dtype=dtype) if dtype == torch.half: pipe.model.half() @@ -397,6 +381,96 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.skipif(get_accelerator().device_name() != 'hpu', reason="Kernel Inject False validation for HPU tests.") +@pytest.mark.nightly +class TestModelTaskKIFalse(DistributedTest): + world_size = 1 + + def test( + self, + model_w_task, + dtype, + enable_cuda_graph, + enable_triton, + query, + inf_kwargs, + assert_fn, + perf_meas=True, + ): + invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton) + if invalid_test_msg: + pytest.skip(invalid_test_msg) + + model, task = model_w_task + local_rank = int(os.getenv("LOCAL_RANK", "0")) + + # Load the model on CPU first to avoid OOM for large models @fp32 + if dtype == torch.bfloat16: + pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt", torch_dtype=torch.bfloat16) + else: + pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt") + if dtype == torch.half: + pipe.model.half() + + # Switch device to GPU/HPU after converting to half + device = torch.device(get_accelerator().device_name(local_rank)) + pipe.device = device + pipe.model.to(device) + + # Warm-up queries for perf measurement + #for i in range(10): + # _ = pipe(query, **inf_kwargs) + get_accelerator().synchronize() + start = time.time() + bs_output = pipe(query, **inf_kwargs) + get_accelerator().synchronize() + bs_time = time.time() - start + injection_policy = {BertLayer: ("output.dense", )} + if "facebook/opt" in model or "Norod78" in model: + injection_policy = {BertLayer: ("out_proj", )} + if "gpt2" in model or "EleutherAI" in model or "bigscience/bloom" in model: + injection_policy = {BertLayer: ("mlp", )} + if "distilbert" in model: + injection_policy = {BertLayer: ("output_layer_norm", )} + args = { + 'mp_size': 1, + 'dtype': dtype, + 'replace_with_kernel_inject': False, + 'enable_cuda_graph': enable_cuda_graph, + 'use_triton': enable_triton, + 'triton_autotune': False, + 'injection_policy': injection_policy, + } + if pipe.tokenizer.model_max_length < deepspeed.ops.transformer.inference.config.DeepSpeedInferenceConfig( + ).max_out_tokens: + args.update({'max_out_tokens': pipe.tokenizer.model_max_length}) + pipe.model = deepspeed.init_inference(pipe.model, **args) + # Warm-up queries for perf measurement + #for i in range(10): + # _ = pipe(query, **inf_kwargs) + get_accelerator().synchronize() + start = time.time() + ds_output = pipe(query, **inf_kwargs) + get_accelerator().synchronize() + ds_time = time.time() - start + + if perf_meas: + print( + f"model={model}, task={task}, dtype={dtype}, cuda_graph={enable_cuda_graph}, triton={enable_triton}, bs_time={bs_time}, ds_time={ds_time}" + ) + + # facebook/opt* and some bigscient/bloom* models are not matching + # baseline exactly, adding an exception to them for now + if ("opt" in model) or ("bloom" in model): + bs_output = pipe(query, **inf_kwargs) + + # These performance tests are only measuring the time for a single + # inference request, we just want to check that performance isn't terrible + #assert ds_time <= (bs_time * 1.1) + + assert assert_fn(bs_output, ds_output) + + @pytest.mark.seq_inference @pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"), ("EleutherAI/gpt-neox-20b", "text-generation"), @@ -428,7 +502,6 @@ def test( # enough GPU memory pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt") bs_output = pipe(query, **inf_kwargs) - pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, dtype=dtype, @@ -457,12 +530,16 @@ def test( ): model, task = model_w_task dtype = torch.float16 + if os.getenv("REPLACE_FP16", default=None): + dtype = torch.bfloat16 if dtype not in get_accelerator().supported_dtypes(): pytest.skip(f"Acceleraor {get_accelerator().device_name()} does not support {dtype}.") local_rank = int(os.getenv("LOCAL_RANK", "0")) - - pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=local_rank, framework="pt") + device = local_rank + if get_accelerator().device_name() != 'cuda': + device = torch.device(f"{get_accelerator().device_name()}") + pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=device, framework="pt") bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, @@ -514,7 +591,6 @@ def test( device=torch.device(get_accelerator().device_name(local_rank)), framework="pt") bs_output = pipe(query, **inf_kwargs) - pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype, @@ -648,13 +724,22 @@ def no_pool_bootstrap_stderr(f, xs, iters): if 'gpt-j-6b' in model_name: dtype = torch.half + if os.getenv("REPLACE_FP16", default=None): + dtype = torch.bfloat16 lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", {"device": "cpu"}) - setattr(lm, model_family, getattr(lm, model_family).half().to(device)) + setattr(lm, model_family, getattr(lm, model_family).to(dtype=dtype).to(device)) lm._device = device else: - lm = lm_eval.models.get_model(model_family).create_from_arg_string( - f"pretrained={model_name}", {"device": get_accelerator().device_name()}) + if get_accelerator().device_name() == 'hpu': + #lm_eval not supporting HPU device, so get model with CPU and move it to HPU. + lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", + {"device": "cpu"}) + setattr(lm, model_family, getattr(lm, model_family).to(device)) + lm._device = device + else: + lm = lm_eval.models.get_model(model_family).create_from_arg_string( + f"pretrained={model_name}", {"device": get_accelerator().device_name()}) get_accelerator().synchronize() start = time.time() diff --git a/tests/unit/inference/test_inference_config.py b/tests/unit/inference/test_inference_config.py index 39d62d17372c..7172ee2a7f21 100644 --- a/tests/unit/inference/test_inference_config.py +++ b/tests/unit/inference/test_inference_config.py @@ -17,7 +17,6 @@ class TestInferenceConfig(DistributedTest): def test_overlap_kwargs(self): config = {"replace_with_kernel_inject": True, "dtype": torch.float32} kwargs = {"replace_with_kernel_inject": True} - engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs) assert engine._config.replace_with_kernel_inject diff --git a/tests/unit/inference/test_model_profiling.py b/tests/unit/inference/test_model_profiling.py index 23e49f89025b..9ae96ed12fcf 100644 --- a/tests/unit/inference/test_model_profiling.py +++ b/tests/unit/inference/test_model_profiling.py @@ -16,6 +16,9 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + @pytest.mark.inference @pytest.mark.parametrize("use_cuda_events", [True, False]) @@ -31,7 +34,6 @@ def test(self, enable_cuda_graph, use_cuda_events): local_rank = int(os.getenv("LOCAL_RANK", "0")) world_size = int(os.getenv("WORLD_SIZE", "1")) - pipe = pipeline(task, model, framework="pt", device=get_accelerator().device_name(local_rank)) pipe.model = deepspeed.init_inference(pipe.model, dtype=dtype, diff --git a/tests/unit/inference/test_stable_diffusion.py b/tests/unit/inference/test_stable_diffusion.py index ac39b7ab12fa..d71d947c3b4b 100644 --- a/tests/unit/inference/test_stable_diffusion.py +++ b/tests/unit/inference/test_stable_diffusion.py @@ -20,22 +20,23 @@ class TestStableDiffusion(DistributedTest): def test(self): from diffusers import DiffusionPipeline from image_similarity_measures.quality_metrics import rmse - generator = torch.Generator(device=get_accelerator().current_device()) + dev = get_accelerator().device_name() + generator = torch.Generator(device=dev) seed = 0xABEDABE7 generator.manual_seed(seed) prompt = "a dog on a rocket" model = "prompthero/midjourney-v4-diffusion" local_rank = int(os.getenv("LOCAL_RANK", "0")) - device = torch.device(f"cuda:{local_rank}") - - pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.half) + device = torch.device(f"{dev}:{local_rank}") + dtype = torch.half + pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=dtype) pipe = pipe.to(device) baseline_image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0] pipe = deepspeed.init_inference( pipe, mp_size=1, - dtype=torch.half, + dtype=dtype, replace_with_kernel_inject=True, enable_cuda_graph=True, ) diff --git a/tests/unit/inference/v2/inference_test_utils.py b/tests/unit/inference/v2/inference_test_utils.py index d63c51267e51..9405b6fde724 100644 --- a/tests/unit/inference/v2/inference_test_utils.py +++ b/tests/unit/inference/v2/inference_test_utils.py @@ -44,3 +44,10 @@ def allclose(x, y, tolerances: Tuple[int, int] = None): else: rtol, atol = tolerances return torch.allclose(x, y, rtol=rtol, atol=atol) + + +def skip_on_inference_v2(): + if get_accelerator().device_name() == 'hpu': + return True + else: + return False diff --git a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py index 376188b92565..49dbdc715556 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDABiasActivation -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_bias_act_implementation(input: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py index 864db6204a16..9d8d2c177607 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py @@ -10,7 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import BlasLibLinear -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # Note: only testing with FP16 and BF16 because we use TF32 on Ampere and we don't have a good # set of tolerances. Since this is just on top of BLAS though, the test is more about diff --git a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py index 8cb95a6cdcba..4232cb7a6324 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAGatedActivation from deepspeed.inference.v2.inference_utils import ActivationType -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_geglu_implementation(input: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py index 0b489894bb9b..0549316081ee 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPostLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py index ffb748e57af2..4da5173d5f53 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPreLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py index 63b16da171c9..16357d0f2967 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_rms_norm(vals: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor: diff --git a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py index ed76dabe1f4c..c396cc1268d0 100644 --- a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py +++ b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.cutlass_ops import MoEGEMM -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') SINGLE_EXPERT_CASES = [(13, 2048, 2048), (256, 1024, 4096), (278, 5120, 2048), (893, 5120, 2560)] diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py index a33c938a0608..ad88ff2c5d69 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py @@ -6,8 +6,13 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import AtomBuilder from .ragged_testing_utils import build_complex_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') Q_BLOCK_SIZE = 128 KV_BLOCK_SIZE = 128 diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py index ce5a178c9548..bb0192bbbde6 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py @@ -23,7 +23,10 @@ from deepspeed.ops.op_builder import RaggedUtilsBuilder from .ragged_testing_utils import build_batch_and_manager -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py index 5f1ef930952c..03562e48390c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py @@ -9,6 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2_ops diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py index 156be9929d92..06d67777e65d 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.kernels.ragged_ops import BlockedRotaryEmbeddings, BlockedTrainedRotaryEmbeddings from deepspeed.inference.v2.ragged import RaggedBatchWrapper, DSSequenceDescriptor from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ NOTE(cmikeh2): It is very possible to see unit test failures (even on FP16) depending on when certain values are casted up to or down from float32. If we are seeing accuracy issues, we should diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py index 1feefa9ee588..e00aa85d194c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedLogitsGather -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_simple_batch +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(hidden_states: torch.Tensor, seq_lens: List[int]) -> torch.Tensor: output = torch.empty((len(seq_lens), hidden_states.shape[1]), diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py index 3907fc3e3a4b..6538a81ec00a 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py @@ -14,6 +14,10 @@ RaggedTopKGating, ) from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` and ``MoEScatter`` to produce correct inputs. If either of these kernels is broken diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py index aae459f06a6f..9edc014eae33 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py @@ -10,6 +10,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import MoEScatter, RaggedTopKGating from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` to produce correct inputs. If ``RaggedTopKGating`` is broken, these tests will fail, so double check diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py index f179f62a9b12..32d7d312a4cf 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedEmbeddingKernel -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_batch_and_manager +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(token_ids: torch.Tensor, embedding_table: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py index 5fa0c8a079f0..178512351c0f 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import RaggedTopKGating from .ragged_testing_utils import build_simple_batch -from ...inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _top_k_gating_testing_helper(n_tokens: int, n_experts: int, n_top_k: int, seed: int = 0xC0FFEE) -> None: diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py index 52ff0e134dfc..901d9d9b43e7 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py @@ -15,6 +15,10 @@ ) from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class TransformerLayerContainer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py index 07ad87e6168d..c457227d5499 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class ParentLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py index 52313cb6f202..0701b8dcc4d8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiDependencyContainer(ParameterBase): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py index b319bf6de4ad..e7ba08b3c2a8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiParameterLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py index 06ff9047d648..5f39d3251ea9 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py @@ -6,6 +6,7 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList @@ -13,6 +14,10 @@ from deepspeed.inference.v2.model_implementations.common_parameters import * from .utils import validate_device +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class SimpleMoELayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py index 850c4c24fde6..fb7901dbf938 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # None of the logic should be dependent on head size. HEAD_SIZE = 64 diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py index aac7e5391d8f..553d604d30ee 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def round_up_to_256(x: int) -> int: diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py index 9a1cb9c09c64..86575d2176ad 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py @@ -10,6 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def fill_with_head_ids(head_size: int, n_heads_q: int, n_heads_kv: Optional[int] = None) -> torch.Tensor: diff --git a/tests/unit/inference/v2/modules/test_blas_linear_module.py b/tests/unit/inference/v2/modules/test_blas_linear_module.py index f4d0b1991238..b50819875699 100644 --- a/tests/unit/inference/v2/modules/test_blas_linear_module.py +++ b/tests/unit/inference/v2/modules/test_blas_linear_module.py @@ -13,7 +13,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSLinearConfig from deepspeed.inference.v2.modules.interfaces import DSLinearRegistry -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(hidden_states: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/modules/test_blocked_attn.py b/tests/unit/inference/v2/modules/test_blocked_attn.py index 6556aa460a44..b55909a69caf 100644 --- a/tests/unit/inference/v2/modules/test_blocked_attn.py +++ b/tests/unit/inference/v2/modules/test_blocked_attn.py @@ -16,7 +16,10 @@ from deepspeed.inference.v2.modules.interfaces import DSSelfAttentionRegistry, DSSelfAttentionBase from ..kernels.ragged_ops.ragged_testing_utils import build_batch_and_manager -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py index 386f3b3ef0b3..dc971f83f0c2 100644 --- a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py +++ b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_custom_module.py b/tests/unit/inference/v2/modules/test_custom_module.py index eb54b7a913f2..b813b715ec1e 100644 --- a/tests/unit/inference/v2/modules/test_custom_module.py +++ b/tests/unit/inference/v2/modules/test_custom_module.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.implementations import cuda_post_ln -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_cutlass_moe.py b/tests/unit/inference/v2/modules/test_cutlass_moe.py index b14ba127c6be..27ba70b88728 100644 --- a/tests/unit/inference/v2/modules/test_cutlass_moe.py +++ b/tests/unit/inference/v2/modules/test_cutlass_moe.py @@ -15,7 +15,10 @@ from deepspeed.inference.v2.modules.interfaces import DSMoERegistry from ..kernels.ragged_ops.ragged_testing_utils import build_simple_batch -from ...v2.inference_test_utils import allclose, get_dtypes +from ...v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _gating_reference(logits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/tests/unit/inference/v2/modules/test_post_ln_module.py b/tests/unit/inference/v2/modules/test_post_ln_module.py index f9dcfd272170..538eb32c3c85 100644 --- a/tests/unit/inference/v2/modules/test_post_ln_module.py +++ b/tests/unit/inference/v2/modules/test_post_ln_module.py @@ -10,7 +10,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_pre_rms_module.py b/tests/unit/inference/v2/modules/test_pre_rms_module.py index bbd108a35a5a..58bf7761bafa 100644 --- a/tests/unit/inference/v2/modules/test_pre_rms_module.py +++ b/tests/unit/inference/v2/modules/test_pre_rms_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/ragged/test_blocked_allocator.py b/tests/unit/inference/v2/ragged/test_blocked_allocator.py index 4596e81c5652..6dddeff6ee9f 100644 --- a/tests/unit/inference/v2/ragged/test_blocked_allocator.py +++ b/tests/unit/inference/v2/ragged/test_blocked_allocator.py @@ -9,7 +9,12 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged.blocked_allocator import BlockedAllocator +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_manager_configs.py b/tests/unit/inference/v2/ragged/test_manager_configs.py index a5f270cced8c..c2dec7673308 100644 --- a/tests/unit/inference/v2/ragged/test_manager_configs.py +++ b/tests/unit/inference/v2/ragged/test_manager_configs.py @@ -7,7 +7,12 @@ from deepspeed.pydantic_v1 import ValidationError +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged import DSStateManagerConfig +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py index 3cb74f4c49d2..669368f9ef9d 100644 --- a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py +++ b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py @@ -14,6 +14,10 @@ RaggedBatchWrapper, DSStateManagerConfig, ) +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/megatron_model.py b/tests/unit/megatron_model.py index 011ebaf4d3b9..58d5a1308d95 100644 --- a/tests/unit/megatron_model.py +++ b/tests/unit/megatron_model.py @@ -22,7 +22,7 @@ def get_megatron_version(): def get_gpt2_model(args_others, mp_size=1): - from megatron.model import GPT2Model + from megatron.model import GPT2Model # noqa: F401 from megatron.initialize import initialize_megatron args_defaults = { diff --git a/tests/unit/moe/test_moe.py b/tests/unit/moe/test_moe.py index 310a0df16381..9e3c9634eaf7 100644 --- a/tests/unit/moe/test_moe.py +++ b/tests/unit/moe/test_moe.py @@ -11,6 +11,7 @@ from unit.simple_model import SimplePRMoEModel, SimpleMoEModel, sequence_dataloader from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer, is_moe_param from deepspeed.runtime.utils import required_torch_version +from deepspeed.accelerator import get_accelerator @pytest.mark.parametrize("ep_size", [2, 4]) @@ -34,10 +35,13 @@ def test(self, ep_size, zero_stage, use_residual): } } hidden_dim = 16 + dtype = torch.half # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) + #TODO SW-179530: remove workaround when issue with lazy mode is resolved (see SW-179530). + model.to(get_accelerator().device_name()) param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} params = split_params_into_different_moe_groups_for_optimizer(param_group) optimizer = torch.optim.AdamW(params=params) @@ -47,7 +51,11 @@ def test(self, ep_size, zero_stage, use_residual): dist_init_required=False) #dist_init_required=False -- parameterize to True/False? - data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = sequence_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) def strict_average_tensor(tensor): process_group = optimizer.dp_process_group @@ -116,6 +124,7 @@ def test(self, ep_size, use_residual): config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 + dtype = torch.half # E+D -- ep_size = 2 # E only -- ep_size = 4 @@ -126,7 +135,11 @@ def test(self, ep_size, use_residual): optimizer=optimizer, dist_init_required=False) - data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = sequence_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) diff --git a/tests/unit/ops/adam/test_adamw.py b/tests/unit/ops/adam/test_adamw.py index 3b1b088766a5..0b4aad451561 100644 --- a/tests/unit/ops/adam/test_adamw.py +++ b/tests/unit/ops/adam/test_adamw.py @@ -6,12 +6,12 @@ import deepspeed import torch import pytest - from deepspeed.ops.adam import FusedAdam from deepspeed.ops.adam import DeepSpeedCPUAdam from unit.common import DistributedTest from unit.simple_model import SimpleModel from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder if torch.half not in get_accelerator().supported_dtypes(): pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) @@ -67,6 +67,9 @@ def test(self, "cpu_offload": zero_offload } } + if (resulting_optimizer[0] == FusedAdam) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + model = SimpleModel(10) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, diff --git a/tests/unit/ops/adam/test_cpu_adam.py b/tests/unit/ops/adam/test_cpu_adam.py index 9a6ff6689446..785cf786acc3 100644 --- a/tests/unit/ops/adam/test_cpu_adam.py +++ b/tests/unit/ops/adam/test_cpu_adam.py @@ -11,7 +11,7 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.adam import FusedAdam -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedAdamBuilder from unit.common import DistributedTest if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: @@ -62,6 +62,8 @@ class TestCPUAdam(DistributedTest): set_dist_env = False @pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.") + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test_fused_adam_equal(self, dtype, model_size): if ("amd" in pytest.cpu_vendor) and (dtype == torch.half): pytest.skip("cpu-adam with half precision not supported on AMD CPUs") diff --git a/tests/unit/ops/adam/test_hybrid_adam.py b/tests/unit/ops/adam/test_hybrid_adam.py index c7ef4890b322..9003e02588c1 100644 --- a/tests/unit/ops/adam/test_hybrid_adam.py +++ b/tests/unit/ops/adam/test_hybrid_adam.py @@ -12,7 +12,7 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.adam import FusedAdam, DeepSpeedCPUAdam -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedAdamBuilder from unit.common import DistributedTest if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: @@ -43,6 +43,8 @@ class TestHybridAdam(DistributedTest): set_dist_env = False @pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.") + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test_hybrid_adam_equal(self, dtype, model_size): if ("amd" in pytest.cpu_vendor) and (dtype == torch.half): pytest.skip("cpu-adam with half precision not supported on AMD CPUs") diff --git a/tests/unit/ops/aio/test_aio.py b/tests/unit/ops/aio/test_aio.py old mode 100644 new mode 100755 diff --git a/tests/unit/ops/transformer/inference/inference_test_utils.py b/tests/unit/ops/transformer/inference/inference_test_utils.py index 9c7b428c0e68..9af48e12f4fb 100644 --- a/tests/unit/ops/transformer/inference/inference_test_utils.py +++ b/tests/unit/ops/transformer/inference/inference_test_utils.py @@ -26,8 +26,10 @@ def get_tolerances(): def get_dtypes(): global DTYPES if DTYPES is None: - DTYPES = [torch.float16, torch.float32] + DTYPES = [torch.float32] try: + if get_accelerator().is_fp16_supported(): + DTYPES.append(torch.float16) if get_accelerator().is_bf16_supported(): DTYPES.append(torch.bfloat16) except (AssertionError, AttributeError): diff --git a/tests/unit/ops/transformer/inference/test_bias_add.py b/tests/unit/ops/transformer/inference/test_bias_add.py index 843c9b889c2b..f25bbc1be692 100644 --- a/tests/unit/ops/transformer/inference/test_bias_add.py +++ b/tests/unit/ops/transformer/inference/test_bias_add.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_add import BiasAddOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -22,15 +23,8 @@ def run_bias_add_reference(activations, bias): def run_bias_add_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_add_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_add_bf16(activations, bias) - else: - return inference_module.bias_add_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasAddOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_geglu.py b/tests/unit/ops/transformer/inference/test_bias_geglu.py index d5ab13964974..05de4fbb4cf8 100644 --- a/tests/unit/ops/transformer/inference/test_bias_geglu.py +++ b/tests/unit/ops/transformer/inference/test_bias_geglu.py @@ -8,13 +8,13 @@ import deepspeed from deepspeed.ops.op_builder import InferenceBuilder from deepspeed.accelerator import get_accelerator +from deepspeed.ops.transformer.inference.op_binding.gated_activation import GatedActivationOp from deepspeed.utils.types import ActivationFuncType from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -27,10 +27,7 @@ def run_bias_geglu_reference(activations, bias): def run_bias_geglu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_GELU) @pytest.mark.inference_ops @@ -56,17 +53,14 @@ def run_gated_silu_reference(activations, bias): def run_gated_silu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_SILU) @pytest.mark.inference_ops @pytest.mark.parametrize("batch", [1, 2]) @pytest.mark.parametrize("sequence", [1, 128, 255]) @pytest.mark.parametrize("channels", [512, 1232, 4096]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_gated_silu(batch, sequence, channels, dtype): activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name()) bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name()) diff --git a/tests/unit/ops/transformer/inference/test_bias_gelu.py b/tests/unit/ops/transformer/inference/test_bias_gelu.py index fd82da51380c..b69030e87ace 100644 --- a/tests/unit/ops/transformer/inference/test_bias_gelu.py +++ b/tests/unit/ops/transformer/inference/test_bias_gelu.py @@ -8,13 +8,14 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp from .inference_test_utils import allclose, get_dtypes from packaging import version as pkg_version if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -25,15 +26,8 @@ def run_bias_gelu_reference(activations, bias): def run_bias_gelu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_gelu_bf16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_relu.py b/tests/unit/ops/transformer/inference/test_bias_relu.py index 881af78e92cf..57134665b241 100644 --- a/tests/unit/ops/transformer/inference/test_bias_relu.py +++ b/tests/unit/ops/transformer/inference/test_bias_relu.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_relu import BiasReluOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -23,15 +24,8 @@ def run_bias_relu_reference(activations, bias): def run_bias_relu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_relu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_relu_bf16(activations, bias) - else: - return inference_module.bias_relu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasReluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_gelu.py b/tests/unit/ops/transformer/inference/test_gelu.py index de924848bfb4..beb74d09ab30 100644 --- a/tests/unit/ops/transformer/inference/test_gelu.py +++ b/tests/unit/ops/transformer/inference/test_gelu.py @@ -7,11 +7,12 @@ import torch import deepspeed from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -42,15 +43,11 @@ def run_gelu_ds(activations, use_triton_ops=False): from deepspeed.ops.transformer.inference.triton import gelu return gelu(activations) + device = deepspeed.accelerator.get_accelerator().device_name() channels = activations.shape[-1] - bias = torch.zeros((channels), dtype=activations.dtype, device='cuda') - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + bias = torch.zeros((channels), dtype=activations.dtype, device=device) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops @@ -60,7 +57,8 @@ def run_gelu_ds(activations, use_triton_ops=False): @pytest.mark.parametrize("dtype", [torch.float16]) @pytest.mark.parametrize("use_triton_ops", [True, False]) def test_gelu(batch, sequence, channels, dtype, use_triton_ops): - activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda') + device = deepspeed.accelerator.get_accelerator().device_name() + activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) activations_ref = activations_ds.clone().detach() if not deepspeed.HAS_TRITON and use_triton_ops: diff --git a/tests/unit/ops/transformer/inference/test_layer_norm.py b/tests/unit/ops/transformer/inference/test_layer_norm.py index 711a35213015..9c2a25b3bbc0 100644 --- a/tests/unit/ops/transformer/inference/test_layer_norm.py +++ b/tests/unit/ops/transformer/inference/test_layer_norm.py @@ -8,6 +8,7 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from .inference_test_utils import allclose, get_dtypes, assert_almost_equal try: import triton # noqa: F401 # type: ignore @@ -21,8 +22,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): vals_f = vals.to(torch.float32) @@ -32,10 +31,7 @@ def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): def ds_implementation(vals, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm(vals, gamma, beta, epsilon) + return LayerNormOp()(vals, gamma, beta, epsilon) def ds_triton_implementation(vals, gamma, beta, epsilon): @@ -83,10 +79,7 @@ def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual(vals, bias, res, gamma, beta, epsilon) def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon): @@ -137,10 +130,7 @@ def residual_store_ref_implementation(vals, bias, res, gamma, beta, epsilon, cha def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) @pytest.mark.inference_ops @@ -175,6 +165,7 @@ def test_layer_norm_residual_store_pre_ln_res(batch, seq_len, channels, dtype): def test_triton_layer_norm(M, N, dtype, residual, input_bias, eps=1e-5, device='cuda'): if not deepspeed.HAS_TRITON: pytest.skip("triton has to be installed for the test") + torch.manual_seed(0) # create data x_shape = (M, N) diff --git a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py index e1c8127a83ac..dcf9f16baaf1 100644 --- a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py +++ b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py @@ -8,24 +8,20 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.moe_res_matmul import MoEResMatmulOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def run_moe_res_matmul_reference(residual, coef1, coef2, output): return residual * coef1 + output * coef2 def run_moe_res_matmul_ds(residual, coef, output): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() coef_t = coef.transpose(-1, -2).contiguous() - return inference_module.moe_res_matmul(residual, coef_t, output) + return MoEResMatmulOp()(residual, coef_t, output) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_residual_add.py b/tests/unit/ops/transformer/inference/test_residual_add.py index c2952f74ff2d..807da4904341 100644 --- a/tests/unit/ops/transformer/inference/test_residual_add.py +++ b/tests/unit/ops/transformer/inference/test_residual_add.py @@ -8,6 +8,8 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding import ResidualAddOp from .inference_test_utils import get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: @@ -36,11 +38,6 @@ def allclose(x, y): return torch.allclose(x, y, rtol=rtol, atol=atol) -@pytest.fixture(scope="module") -def inference_module(): - return InferenceBuilder().load() - - def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True): if pre_attn_norm: hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size @@ -75,9 +72,9 @@ def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, f @pytest.mark.parametrize("mp_size", [1, 2]) @pytest.mark.parametrize("pre_attn_norm", [True, False]) @pytest.mark.parametrize("use_triton_ops", [True, False]) -def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, - pre_attn_norm, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops and dtype == torch.float16: +def test_residual_add(batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, pre_attn_norm, + use_triton_ops): + if not deepspeed.HAS_TRITON and use_triton_ops: pytest.skip("triton has to be installed for the test") ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) residual = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) @@ -96,19 +93,9 @@ def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_ if use_triton_ops: from deepspeed.ops.transformer.inference.triton import residual_add_bias ds_out = residual_add_bias(*res_add_args) - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - elif dtype == torch.bfloat16: - ds_out = inference_module.residual_add_bias_bf16(*res_add_args) else: - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - else: - raise ValueError(f"Unsupported dtype: {dtype}") + config = DeepSpeedInferenceConfig(dtype=dtype) + ds_out = ResidualAddOp(config).residual_add_func(*res_add_args) if not allclose(ds_out, ref_out): print((ds_out - ref_out).abs().max()) diff --git a/tests/unit/ops/transformer/inference/test_rms_norm.py b/tests/unit/ops/transformer/inference/test_rms_norm.py index 508a40e12e8d..ed500ec16f9f 100644 --- a/tests/unit/ops/transformer/inference/test_rms_norm.py +++ b/tests/unit/ops/transformer/inference/test_rms_norm.py @@ -8,13 +8,13 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder # type: ignore +from deepspeed.ops.transformer.inference.op_binding.pre_rms_norm import PreRMSNormOp +from deepspeed.ops.transformer.inference.op_binding.rms_norm import RMSNormOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, epsilon): variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) @@ -27,10 +27,7 @@ def ref_implementation(vals, gamma, epsilon): def ds_implementation(vals, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.rms_norm(vals, gamma, epsilon) + return RMSNormOp()(vals, gamma, epsilon) @pytest.mark.inference_ops @@ -51,10 +48,7 @@ def test_rms_norm(batch, seq_len, channels, dtype): def pre_ds_implementation(vals, residual, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.pre_rms_norm(vals, residual, gamma, epsilon) + return PreRMSNormOp()(vals, residual, gamma, epsilon) def pre_ref_implementation(vals, residual, gamma, epsilon): @@ -74,7 +68,7 @@ def pre_ref_implementation(vals, residual, gamma, epsilon): @pytest.mark.parametrize("batch", [1, 32]) @pytest.mark.parametrize("seq_len", [1, 128]) @pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_pre_norm(batch, seq_len, channels, dtype): device = get_accelerator().current_device_name() vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device) diff --git a/tests/unit/ops/transformer/inference/test_softmax.py b/tests/unit/ops/transformer/inference/test_softmax.py index 76046f31e01a..4f6d69160aa7 100644 --- a/tests/unit/ops/transformer/inference/test_softmax.py +++ b/tests/unit/ops/transformer/inference/test_softmax.py @@ -11,7 +11,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -43,7 +42,9 @@ def run_softmax_ds(input, use_triton_ops=False): def test_softmax(batch, sequence, channels, dtype, use_triton_ops): if not deepspeed.HAS_TRITON and use_triton_ops: pytest.skip("triton has to be installed for the test") - input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda') + + device = deepspeed.accelerator.get_accelerator().device_name() + input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) input_ref = input_ds.clone().detach() ds_out = run_softmax_ds(input_ds, use_triton_ops) diff --git a/tests/unit/pipe/test_pipe_module.py b/tests/unit/pipe/test_pipe_module.py index 05c6a82ef55a..9deb5ff9efd7 100644 --- a/tests/unit/pipe/test_pipe_module.py +++ b/tests/unit/pipe/test_pipe_module.py @@ -71,7 +71,6 @@ def test(self, sequential_model, simple_config, batch_input, activation_checkpoi pipe_model = copy.deepcopy(sequential_model) pipe_model = PipelineModule(layers=pipe_model, num_stages=2) - # Ensure all parameters are accounted for. my_params = sum(p.numel() for p in pipe_model.parameters()) total_pipe_params = torch.LongTensor([my_params]).to(get_accelerator().device_name()) diff --git a/tests/unit/profiling/flops_profiler/test_flops_profiler.py b/tests/unit/profiling/flops_profiler/test_flops_profiler.py index bbcb01b489f4..7f21982760ff 100644 --- a/tests/unit/profiling/flops_profiler/test_flops_profiler.py +++ b/tests/unit/profiling/flops_profiler/test_flops_profiler.py @@ -82,6 +82,8 @@ def test(self): "top_modules": 3, }, } + dtype = torch.half + hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) @@ -91,7 +93,7 @@ def test(self): total_samples=50, hidden_dim=hidden_dim, device=model.device, - dtype=torch.half) + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) diff --git a/tests/unit/runtime/comm/test_coalesced_collectives.py b/tests/unit/runtime/comm/test_coalesced_collectives.py index d9ac79619bd3..6c677cf46482 100644 --- a/tests/unit/runtime/comm/test_coalesced_collectives.py +++ b/tests/unit/runtime/comm/test_coalesced_collectives.py @@ -18,7 +18,8 @@ class TestReduceScatterCoalesced(DistributedTest): world_size = 2 def test_single_input(self): - input = torch.full((6, ), dist.get_rank(), dtype=torch.half, device=get_accelerator().current_device_name()) + dtype = torch.half + input = torch.full((6, ), dist.get_rank(), dtype=dtype, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) @@ -26,7 +27,8 @@ def test_single_input(self): assert torch.allclose(output, torch.full_like(output, 0.5)) def test_two_inputs(self): - tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": torch.half} + dtype = torch.half + tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": dtype} inputs = [ dist.get_rank() * torch.arange(0, 6, **tensor_kwargs), dist.get_rank() * torch.arange(6, 9, **tensor_kwargs), @@ -50,7 +52,8 @@ class TestReduceScatterCoalescedTensorSmallerThanWorldSize(DistributedTest): world_size = 2 def test(self): - input = torch.zeros((1, ), dtype=torch.half, device=get_accelerator().current_device_name()) + dtype = torch.half + input = torch.zeros((1, ), dtype=dtype, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) diff --git a/tests/unit/runtime/compile/test_compile_wrapper.py b/tests/unit/runtime/compile/test_compile_wrapper.py index 98a7c28c6a28..993a7e4509e9 100644 --- a/tests/unit/runtime/compile/test_compile_wrapper.py +++ b/tests/unit/runtime/compile/test_compile_wrapper.py @@ -31,7 +31,7 @@ def base_config(): }, "compile": { "enabled": True, - "backend": "inductor" + "backend": get_accelerator().get_compile_backend() } } return config_dict diff --git a/tests/unit/runtime/compile/test_compile_zero.py b/tests/unit/runtime/compile/test_compile_zero.py index 910f32db1c96..09fe946218d6 100644 --- a/tests/unit/runtime/compile/test_compile_zero.py +++ b/tests/unit/runtime/compile/test_compile_zero.py @@ -8,6 +8,7 @@ from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.utils import required_torch_version +from deepspeed.accelerator import get_accelerator from unit.runtime.compile.util import compare_loss from unit.common import DistributedTest @@ -48,7 +49,7 @@ def test_compile_zero(self, tmpdir, zero_stage, dtype, offload_device): }, "compile": { "enabled": True, - "backend": "inductor" + "backend": get_accelerator().get_compile_backend() } } diff --git a/tests/unit/runtime/compile/test_load_config.py b/tests/unit/runtime/compile/test_load_config.py index 5f1c01b86852..1db913ede857 100644 --- a/tests/unit/runtime/compile/test_load_config.py +++ b/tests/unit/runtime/compile/test_load_config.py @@ -29,7 +29,7 @@ def custom_backend(gm: torch.fx.GraphModule, example_inputs): def custom_compiler_fn(module: torch.nn.Module): global custom_compler_fn_called custom_compler_fn_called = True - return torch.compile(module) + return torch.compile(module, backend=get_accelerator().get_compile_backend()) @pytest.fixture @@ -47,7 +47,7 @@ def base_config(): }, "compile": { "enabled": True, - "backend": "inductor" + "backend": get_accelerator().get_compile_backend() } } return config_dict diff --git a/tests/unit/runtime/half_precision/onebit/test_onebit.py b/tests/unit/runtime/half_precision/onebit/test_onebit.py index ba795a853be0..14feb54550f0 100644 --- a/tests/unit/runtime/half_precision/onebit/test_onebit.py +++ b/tests/unit/runtime/half_precision/onebit/test_onebit.py @@ -33,6 +33,9 @@ pytest.skip("NCCL-based 1-bit compression is not yet supported w. ROCm 5 until cupy supports ROCm 5", allow_module_level=True) +if get_accelerator().device_name() == 'hpu': + pytest.skip("1-bit compression is not supported by HPU.", allow_module_level=True) + @pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"]) class TestOneBitAdamBasic(DistributedTest): diff --git a/tests/unit/runtime/half_precision/test_bf16.py b/tests/unit/runtime/half_precision/test_bf16.py index 3f551fb0fd4a..fd0b4cb0dcaa 100644 --- a/tests/unit/runtime/half_precision/test_bf16.py +++ b/tests/unit/runtime/half_precision/test_bf16.py @@ -10,8 +10,9 @@ from unit.common import DistributedTest from deepspeed.ops.op_builder import CPUAdamBuilder from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator class TestAdamBF16ZeroOneCycleCompatibility(DistributedTest): @@ -196,6 +197,9 @@ def test(self, optimizer_constructor, zero_stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -274,6 +278,9 @@ def test(self, stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, @@ -298,7 +305,10 @@ def test(self, comp_type, comm_type): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) - + if comm_type and (comp_type not in get_accelerator().supported_dtypes() + or comm_type not in get_accelerator().supported_dtypes()): + pytest.skip( + f"comp_type:{comp_type}, comm_type:{comm_type} not supported by {get_accelerator().device_name()}.") type_str = {torch.float16: "fp16", torch.bfloat16: "bfp16"} config_dict = { @@ -321,6 +331,12 @@ def test(self, comp_type, comm_type): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + # TODO: remove this when the following is resolved: + # https://jira.habana-labs.com/browse/SW-137450 + config_dict["fp16"]["initial_scale_power"] = 30 + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, diff --git a/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py b/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py index 2a58fd6b4a57..ddc274e9bc18 100644 --- a/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py +++ b/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py @@ -6,8 +6,14 @@ import torch import deepspeed import numpy as np +import pytest from unit.common import DistributedTest from unit.simple_model import SimpleModel +from deepspeed.ops.op_builder import FusedLambBuilder +from deepspeed.accelerator import get_accelerator + +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) def run_model_step(model, gradient_list): @@ -38,6 +44,7 @@ def test_no_overflow(self): "loss_scale_window": 2 } } + hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) @@ -143,6 +150,7 @@ def test_some_overflow(self): assert optim.cur_iter == expected_iteration +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestUnfused(DistributedTest): world_size = 1 diff --git a/tests/unit/runtime/half_precision/test_fp16.py b/tests/unit/runtime/half_precision/test_fp16.py index 3d5e18b46502..c2fd929ff7e6 100644 --- a/tests/unit/runtime/half_precision/test_fp16.py +++ b/tests/unit/runtime/half_precision/test_fp16.py @@ -12,7 +12,9 @@ from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader from deepspeed.runtime.utils import required_torch_version from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder, FusedAdamBuilder +from unit.util import hpu_lazy_enabled +from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer try: from apex import amp # noqa: F401 # type: ignore @@ -21,7 +23,11 @@ _amp_available = False amp_available = pytest.mark.skipif(not _amp_available, reason="apex/amp is not installed") +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP32GradClip(DistributedTest): world_size = 2 @@ -52,6 +58,7 @@ def test(self): model.step() +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP16(DistributedTest): world_size = 2 @@ -187,6 +194,8 @@ def mock_unscale_and_clip_grads(total_norm, apply_scale=True): engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="fused adam is not compatible") def test_fused_gradnorm(self, monkeypatch): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -203,8 +212,10 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) # initialize MoE model = SimpleMoEModel(hidden_dim, ep_size=2) + param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} + params = split_params_into_different_moe_groups_for_optimizer(param_group) # optimizer = torch.optim.AdamW(params=model.parameters()) - optimizer = FusedAdam(params=model.parameters()) + optimizer = FusedAdam(params=params) engine, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, @@ -216,6 +227,7 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") @pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)]) def test_lamb_gradnorm(self, monkeypatch, fused_lamb_legacy: bool): if not required_torch_version(min_version=1.8): @@ -561,8 +573,10 @@ def test(self, zero_stage, optimizer_constructor): } } hidden_dim = 10 - model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -690,6 +704,9 @@ def test(self, stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) diff --git a/tests/unit/runtime/pipe/test_pipe.py b/tests/unit/runtime/pipe/test_pipe.py index 88e26290b650..eee8cf90fb8c 100644 --- a/tests/unit/runtime/pipe/test_pipe.py +++ b/tests/unit/runtime/pipe/test_pipe.py @@ -6,7 +6,6 @@ import copy import torch.nn as nn import pytest - import deepspeed.comm as dist from deepspeed.runtime.pipe.topology import PipeDataParallelTopology from deepspeed.runtime.pipe.module import PipelineModule @@ -67,7 +66,6 @@ def test_pipe_base(self, topo_config): skip_on_arch(min_arch=7) topo = PipeTopo(**topo_config) steps = 100 # must be >=100 - # Allocate model for consistent initial weights. init_net = AlexNetPipe() diff --git a/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py b/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py index 92da2257bdb0..89977bbf3668 100644 --- a/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py +++ b/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py @@ -5,9 +5,14 @@ import torch import deepspeed +import pytest from unit.common import DistributedTest +from deepspeed.accelerator import get_accelerator from unit.util import skip_on_arch +if get_accelerator().device_name() == 'hpu': + pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True) + class Model(torch.nn.Module): diff --git a/tests/unit/runtime/sparse_tensor/test_sparse_grads.py b/tests/unit/runtime/sparse_tensor/test_sparse_grads.py index 0689adc08670..01558cb8c6a7 100644 --- a/tests/unit/runtime/sparse_tensor/test_sparse_grads.py +++ b/tests/unit/runtime/sparse_tensor/test_sparse_grads.py @@ -5,10 +5,14 @@ import torch import deepspeed +import pytest from unit.common import DistributedTest - +from deepspeed.accelerator import get_accelerator import deepspeed.utils.groups as groups +if get_accelerator().device_name() == 'hpu': + pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True) + class Model(torch.nn.Module): @@ -43,7 +47,6 @@ class TestSparseAdam(DistributedTest): def test(self): config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True} - model = Model() optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters())) engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) diff --git a/tests/unit/runtime/test_autocast.py b/tests/unit/runtime/test_autocast.py index 9176770afda7..9692f105d69d 100644 --- a/tests/unit/runtime/test_autocast.py +++ b/tests/unit/runtime/test_autocast.py @@ -25,6 +25,7 @@ def test_missing_amp_autocast(self, half_op): output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype + @pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed') def test_disable_autocast_linear(self, half_op): amp = get_accelerator().amp() diff --git a/tests/unit/runtime/test_data_efficiency.py b/tests/unit/runtime/test_data_efficiency.py index b9bd9c3aa56e..afd61b618c87 100644 --- a/tests/unit/runtime/test_data_efficiency.py +++ b/tests/unit/runtime/test_data_efficiency.py @@ -103,9 +103,10 @@ def data_post_process(data, data_sampler_state_dict): assert 'dummy_metric' in data_sampler_state_dict['current_difficulties'] return data + dtype = torch.half hidden_dim = 10 model = SimpleModel(hidden_dim) - dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=torch.half) + dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=dtype) model, _, data_loader, _ = deepspeed.initialize(config=config_dict, model=model, training_data=dataset, @@ -159,9 +160,15 @@ def test_fixed_discrete(self): hidden_dim = 10 ground_truths = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4} + dtype = torch.half + model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=20, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) @@ -202,10 +209,15 @@ def test_fixed_linear(self): } hidden_dim = 10 ground_truths = {1: 2, 2: 4, 3: 4, 4: 6, 5: 6, 6: 8, 7: 8, 8: 10, 9: 10, 10: 10} + dtype = torch.half model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=20, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) diff --git a/tests/unit/runtime/test_ds_config_dict.py b/tests/unit/runtime/test_ds_config_dict.py index 880282bb7e57..fe525fd0b495 100644 --- a/tests/unit/runtime/test_ds_config_dict.py +++ b/tests/unit/runtime/test_ds_config_dict.py @@ -9,6 +9,7 @@ import json import hjson import argparse +import torch from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.accelerator import get_accelerator @@ -163,11 +164,15 @@ class TestConfigLoad(DistributedTest): world_size = 1 def test_dict(self, base_config): + dtype = torch.half + hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters()) def test_json(self, base_config, tmpdir): + dtype = torch.half + config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: json.dump(base_config, fp) @@ -176,6 +181,8 @@ def test_json(self, base_config, tmpdir): model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters()) def test_hjson(self, base_config, tmpdir): + dtype = torch.half + config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: hjson.dump(base_config, fp) @@ -188,6 +195,8 @@ class TestDeprecatedDeepScaleConfig(DistributedTest): world_size = 1 def test(self, base_config, tmpdir): + + dtype = torch.half config_path = create_config_from_dict(tmpdir, base_config) parser = argparse.ArgumentParser() args = parser.parse_args(args='') @@ -198,7 +207,11 @@ def test(self, base_config, tmpdir): model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(args=args, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -210,13 +223,18 @@ class TestDistInit(DistributedTest): def test(self, base_config): hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters(), dist_init_required=True) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -229,11 +247,15 @@ class TestInitNoOptimizer(DistributedTest): def test(self, base_config): del base_config["optimizer"] hidden_dim = 10 - + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) with pytest.raises(AssertionError): @@ -246,16 +268,20 @@ class TestArgs(DistributedTest): world_size = 1 def test_none_args(self, base_config): + dtype = torch.half + model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(args=None, model=model, config=base_config) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) + data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device, dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) def test_no_args(self, base_config): + dtype = torch.half + model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(model=model, config=base_config) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) + data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device, dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -265,6 +291,7 @@ class TestNoModel(DistributedTest): def test(self, base_config): model = SimpleModel(hidden_dim=10) + with pytest.raises(AssertionError): model, _, _, _ = deepspeed.initialize(model=None, config=base_config) diff --git a/tests/unit/runtime/test_ds_initialize.py b/tests/unit/runtime/test_ds_initialize.py index 8ec9f05a0a17..fc689828831c 100644 --- a/tests/unit/runtime/test_ds_initialize.py +++ b/tests/unit/runtime/test_ds_initialize.py @@ -11,13 +11,15 @@ from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest -from unit.util import bf16_required_version_check, required_amp_check +from unit.util import bf16_required_version_check, required_amp_check, hpu_lazy_enabled import deepspeed from deepspeed.ops.adam import FusedAdam from deepspeed.runtime.lr_schedules import WARMUP_LR, WarmupLR from deepspeed.runtime.config import ADAM_OPTIMIZER from deepspeed.runtime.utils import see_memory_usage, required_torch_version +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder @pytest.mark.parametrize('zero_stage', [0, 3]) @@ -43,6 +45,7 @@ def test(self, zero_stage): # 20B test #hidden_dim = 16 * 1024 hidden_dim = 4 + dtype = torch.half with deepspeed.zero.Init(enabled=zero_stage == 3, config_dict_or_path=ds_config): model = SimpleModel(hidden_dim, nlayers=78) @@ -53,7 +56,7 @@ def test(self, zero_stage): total_samples=50, hidden_dim=hidden_dim, device=model.device, - dtype=torch.half) + dtype=dtype) for batch in data_loader: model(batch[0], batch[1]) see_memory_usage('post-fwds', force=True) @@ -68,6 +71,9 @@ def test(self, optimizer_type): def _optimizer_callable(params) -> Optimizer: return AdamW(params=params) + if (optimizer_type is None) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + hidden_dim = 10 model = SimpleModel(hidden_dim) @@ -79,13 +85,13 @@ def _optimizer_callable(params) -> Optimizer: client_optimizer = Adam(model.parameters()) else: client_optimizer = _optimizer_callable - _, ds_optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=list(model.parameters()), optimizer=client_optimizer) if client_optimizer is None: - assert isinstance(ds_optimizer, FusedAdam) + optim = FusedAdam + assert isinstance(ds_optimizer, optim) elif isinstance(client_optimizer, Optimizer): assert ds_optimizer == client_optimizer else: @@ -96,8 +102,11 @@ def _optimizer_callable(params) -> Optimizer: class TestConfigOptimizer(DistributedTest): world_size = 1 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test(self, client_parameters): ds_config = {"train_batch_size": 1, "optimizer": {"type": "Adam", "params": {"lr": 0.001}}} + optimizer = FusedAdam hidden_dim = 10 model = SimpleModel(hidden_dim) @@ -108,8 +117,7 @@ def test(self, client_parameters): model_parameters = None _, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters) - - assert isinstance(ds_optimizer, FusedAdam) + assert isinstance(ds_optimizer, optimizer) @pytest.mark.parametrize('optimizer_extension', ['zero1', 'zero2', 'zero3', 'amp', None]) @@ -136,6 +144,8 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): pytest.skip( "DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) + if fp16 and torch.float16 not in get_accelerator().supported_dtypes(): + pytest.skip(f"FP16 not supported by {get_accelerator().device_name()}") if amp and not required_amp_check(): pytest.skip("Amp is not installed can't run amp check") # Config declaration @@ -222,6 +232,9 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): hidden_dim = 10 model = SimpleModel(hidden_dim) + # TODO: SW-145674 remove this WA when SW-145671 is resolved. + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) model_parameters = list(model.parameters()) if key in is_supported: diff --git a/tests/unit/runtime/test_multi_output_model.py b/tests/unit/runtime/test_multi_output_model.py index d9aba419b158..6c3263f7729c 100644 --- a/tests/unit/runtime/test_multi_output_model.py +++ b/tests/unit/runtime/test_multi_output_model.py @@ -5,9 +5,14 @@ import torch import deepspeed +import pytest from pytest import approx from unit.common import DistributedTest from unit.multi_output_model import MultiOutputModel, multi_output_dataloader +from deepspeed.accelerator import get_accelerator + +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) class TestTwoOutputModel(DistributedTest): @@ -35,7 +40,6 @@ def test(self, tmpdir): hidden_dim = 10 weight_value = 0.1 - model = MultiOutputModel(hidden_dim, weight_value) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) total_samples = 4 diff --git a/tests/unit/runtime/test_pld.py b/tests/unit/runtime/test_pld.py index 1f602db73b2f..cfe4f3e38b10 100644 --- a/tests/unit/runtime/test_pld.py +++ b/tests/unit/runtime/test_pld.py @@ -5,9 +5,9 @@ import numpy as np import deepspeed +import torch import pytest from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop - from unit.common import DistributedTest from unit.simple_model import SimpleModel, PLD_SimpleModel, random_dataloader @@ -49,11 +49,15 @@ def test_pld_model(self, theta): } } hidden_dim = 10 - + dtype = torch.half model = PLD_SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -90,11 +94,16 @@ def test_non_pld_model(self): } } hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=1, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): with pytest.raises(TypeError): diff --git a/tests/unit/runtime/zero/test_ignore_unused_parameters.py b/tests/unit/runtime/zero/test_ignore_unused_parameters.py index aade488fde42..baee69a4e3fa 100644 --- a/tests/unit/runtime/zero/test_ignore_unused_parameters.py +++ b/tests/unit/runtime/zero/test_ignore_unused_parameters.py @@ -4,10 +4,10 @@ # DeepSpeed Team import pytest +import torch from unit.common import DistributedTest from unit.simple_model import UnusedParametersModel, random_dataloader from deepspeed.ops.op_builder import CPUAdamBuilder - import deepspeed @@ -42,11 +42,16 @@ def test(self, ignore_unused_parameters): } } hidden_dim = 4 + dtype = torch.half model = UnusedParametersModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=10, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) def _loop(): for n, batch in enumerate(data_loader): diff --git a/tests/unit/runtime/zero/test_zero.py b/tests/unit/runtime/zero/test_zero.py index 5a8af95bb0f8..ac07334f52d8 100644 --- a/tests/unit/runtime/zero/test_zero.py +++ b/tests/unit/runtime/zero/test_zero.py @@ -25,6 +25,7 @@ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from deepspeed.runtime.zero.utils import ZeRORuntimeException from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled def run_unbalanced_gradients(model, data_loader): @@ -77,10 +78,14 @@ def test(self, zero_stage): }, } hidden_dim = 4 - + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) run_unbalanced_gradients(model, data_loader) @@ -117,6 +122,7 @@ def test(self, mics_enabled, zero_stage=3): }, } hidden_dim = 4 + dtype = torch.half class AlbertLikeModel(torch.nn.Module): @@ -134,7 +140,11 @@ def forward(self, x, y): model = AlbertLikeModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -195,6 +205,7 @@ def forward(self, x, y): return self.cross_entropy_loss(hidden, y) hidden_dim = 3 # do not change + dtype = torch.half world_size = dist.get_world_size() # we want at least 2x layers as there are gpus to trigger round_robin_fp16_groups reshuffle in zero2 @@ -202,10 +213,15 @@ def forward(self, x, y): model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + # Flush zero stage 3 cache model.empty_partition_cache() - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -283,10 +299,13 @@ def forward(self, x, y): return self.cross_entropy_loss(hidden, y) hidden_dim = 3 + dtype = torch.half world_size = dist.get_world_size() n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) optim_groups = [ { @@ -308,7 +327,11 @@ def forward(self, x, y): ) model.empty_partition_cache() - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -372,6 +395,7 @@ def test(self, allgather_bucket_size, zero_stage=2): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) if allgather_bucket_size % 2 == 0: @@ -407,6 +431,7 @@ def test(self, zero_stage=2): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) @@ -423,6 +448,7 @@ def test(self, zero_stage=2): def _ds_initialize_for_param_partitioning_testing(model: Module, cfg: dict) -> DeepSpeedEngine: + ds_engine, _, _, _ = deepspeed.initialize(config=cfg, model=model, model_parameters=model.parameters()) return ds_engine @@ -864,12 +890,15 @@ def forward(self, x: Tensor) -> Tensor: "loss_scale": 1.0, }, } - with deepspeed.zero.Init(mem_efficient_linear=False, enabled=init_context_manager): + dtype = torch.float16 + zero3_init_dtype = None + + with deepspeed.zero.Init(dtype=zero3_init_dtype, mem_efficient_linear=False, enabled=init_context_manager): model = LargeParamModel() ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_config) for train_iter in range(3): # test multiple iterations to cover prefetching - activation: Tensor = ds_engine(torch.ones(param_sz, dtype=torch.float16, device=ds_engine.device)) + activation: Tensor = ds_engine(torch.ones(param_sz, dtype=dtype, device=ds_engine.device)) partition_sz = math.ceil(param_sz / self.world_size) for rank_idx, start_idx in enumerate(range(0, param_sz, partition_sz)): @@ -900,7 +929,6 @@ class ManyParamModel(Module): def __init__(self) -> None: super().__init__() - self.modulelist = ModuleList( EltwiseMultiplicationModule(weight=Parameter(torch.empty((param_sz, ), dtype=torch.float32))) for _ in range(n_layers)) @@ -943,6 +971,7 @@ def forward(self, x: Tensor) -> Tensor: "loss_scale": 1.0, }, } + dtype = torch.half with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=init_context_manager): model = ManyParamModel() @@ -950,12 +979,11 @@ def forward(self, x: Tensor) -> Tensor: ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_cfg) for _ in range(3): # test multiple iterations to cover prefetching - activations: List[Tensor] = ds_engine( - torch.ones((param_sz, ), dtype=torch.float16, device=ds_engine.device)) + activations: List[Tensor] = ds_engine(torch.ones((param_sz, ), dtype=dtype, device=ds_engine.device)) assert len(activations) == n_layers partition_sz = math.ceil(param_sz / self.world_size) - expected_activations = torch.empty(param_sz, dtype=torch.float16, device=ds_engine.device) + expected_activations = torch.empty(param_sz, dtype=dtype, device=ds_engine.device) for start_idx in range(0, param_sz, partition_sz): expected_activations[start_idx:start_idx + partition_sz] = dist.get_rank() @@ -983,8 +1011,8 @@ class ModelWhereParentInitializesChildWeights(Module): def __init__(self) -> None: super().__init__() - - self.linear = Linear(12, 1) + dev = get_accelerator().device_name() + self.linear = Linear(12, 1, device=dev) self.apply(self.__init_weights) @@ -1295,10 +1323,15 @@ def test(self): }, } hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1328,6 +1361,8 @@ def test(self, return_type): }, } hidden_dim = 10 + dtype = torch.half + zero3_init_dtype = None class MyModel(torch.nn.Module): @@ -1349,11 +1384,15 @@ def forward(self, x, y): raise NotImplementedError return val - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1397,12 +1436,17 @@ def test(self, zero_stage): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim, nlayers=12) model, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) assert model.global_steps == 0 @@ -1475,11 +1519,16 @@ def forward(self, x, y): val = (x, loss) return val - with deepspeed.zero.Init(config_dict_or_path=config_dict, enabled=zero_stage == 3): - model = MyModel(hidden_dim) + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype, config_dict_or_path=config_dict, enabled=zero_stage == 3): + model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1509,8 +1558,10 @@ def test(self, force_ds_optim): "zero_force_ds_cpu_optimizer": force_ds_optim, } hidden_dim = 10 - model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) @@ -1538,6 +1589,7 @@ def test_training_partition_cache(self, training): "stage3_param_persistence_threshold": hidden_dim, }, } + dtype = torch.half if training: config_dict["optimizer"] = {"type": "Adam"} @@ -1546,7 +1598,6 @@ def test_training_partition_cache(self, training): model, _, _, _ = deepspeed.initialize(model=model, config=config_dict) - dtype = torch.half data_loader = random_dataloader( model=model, total_samples=6, @@ -1602,7 +1653,6 @@ def test_empty_param_groups(self, dtype, use_client_optimizer, empty_weight_grou "enabled": dtype == torch.bfloat16 } } - if use_client_optimizer: optimizer = torch.optim.AdamW(param_groups, lr=0.1) model_parameters = model.parameters() diff --git a/tests/unit/runtime/zero/test_zero_context.py b/tests/unit/runtime/zero/test_zero_context.py index 0ddf1026eaf8..eaf98966ed72 100644 --- a/tests/unit/runtime/zero/test_zero_context.py +++ b/tests/unit/runtime/zero/test_zero_context.py @@ -71,7 +71,8 @@ def __init__(self, hidden_dim): super(MyModel, self).__init__() self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) - with deepspeed.zero.Init(config_dict_or_path=config_dict): + dtype = None + with deepspeed.zero.Init(config_dict_or_path=config_dict, dtype=dtype): model = MyModel(hidden_dim) with deepspeed.zero.GatheredParameters(list(model.parameters())): @@ -111,7 +112,8 @@ class TestSerialContext(DistributedTest): def test_subclass_param(self): setup_serial_env() - with deepspeed.zero.Init(config=config): + dtype = None + with deepspeed.zero.Init(config=config, dtype=dtype): model = ConvNet() assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE @@ -238,7 +240,7 @@ def forward(self, input): return C.sum() net = ExtLinear() - + dtype = torch.float16 args = SimpleNamespace(local_rank=0) engine, optim, _, _ = deepspeed.initialize(args=args, model=net, @@ -248,7 +250,7 @@ def forward(self, input): with deepspeed.zero.GatheredParameters(net.linear1.weight): assert net.linear1.weight.numel() == net.dim**2 - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) engine.backward(loss) engine.step() @@ -258,7 +260,8 @@ class TestScatterGather(DistributedTest): world_size = 2 def test(self): - with deepspeed.zero.Init(): + dtype = None + with deepspeed.zero.Init(dtype=dtype): l = torch.nn.Linear(6, 3) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE assert l.weight.shape == torch.Size(partitioned_param_data_shape) @@ -277,7 +280,8 @@ class TestGatherUpdate(DistributedTest): world_size = 2 def test(self): - with deepspeed.zero.Init(): + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype): l = torch.nn.Linear(4, 2) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE diff --git a/tests/unit/runtime/zero/test_zero_context_ancestry.py b/tests/unit/runtime/zero/test_zero_context_ancestry.py index 21955f5df152..1a631bb3b805 100644 --- a/tests/unit/runtime/zero/test_zero_context_ancestry.py +++ b/tests/unit/runtime/zero/test_zero_context_ancestry.py @@ -67,7 +67,8 @@ class TestSerialParamInit(DistributedTest): def test_subclass_param_init(self): setup_serial_env() - with deepspeed.zero.Init(config=config): + dtype = None + with deepspeed.zero.Init(config=config, dtype=dtype): model = Son().cpu() # test that all params have been partitioned @@ -107,7 +108,8 @@ def __init__(self): def magic(self): return 42 - with deepspeed.zero.Init(): + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype): model = Model() engine, *_ = deepspeed.initialize(model=model, config=ds_config, model_parameters=model.parameters()) assert engine.magic() == 42 diff --git a/tests/unit/runtime/zero/test_zero_context_return.py b/tests/unit/runtime/zero/test_zero_context_return.py index 874a8ea3b676..884dc3095e04 100644 --- a/tests/unit/runtime/zero/test_zero_context_return.py +++ b/tests/unit/runtime/zero/test_zero_context_return.py @@ -137,12 +137,12 @@ def test_ext_param_return(self): setup_serial_env() net = DanglingExt() - + dtype = torch.float16 args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) engine.backward(loss) engine.step() @@ -151,14 +151,14 @@ def test_ext_param_return(self): def test_ext_param_returnobj(self): setup_serial_env() print() - + dtype = torch.float16 net = ModelContainer(return_obj=True) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) assert len(net._external_params) == 1 assert len(net.dangler._external_params) == 0 @@ -169,14 +169,14 @@ def test_ext_param_returnobj(self): def test_stage_3_output_type(self, output_type): setup_serial_env() print() - + dtype = torch.float16 net = ModelContainerVariableOutputType(output_type=output_type) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(1): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) if loss is not None: if isinstance(loss, dict): diff --git a/tests/unit/runtime/zero/test_zero_dynamic_class.py b/tests/unit/runtime/zero/test_zero_dynamic_class.py index e235206d4dc4..b2cb66dfc79c 100644 --- a/tests/unit/runtime/zero/test_zero_dynamic_class.py +++ b/tests/unit/runtime/zero/test_zero_dynamic_class.py @@ -6,7 +6,6 @@ import torch from unit.common import DistributedTest - import deepspeed @@ -15,7 +14,6 @@ class TestNewClassDeclaredNestingInit(DistributedTest): def test_new_class_declared_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) - with deepspeed.zero.Init(config_dict_or_path=ds_config): class MyModel(torch.nn.Module): diff --git a/tests/unit/runtime/zero/test_zero_nesting_init.py b/tests/unit/runtime/zero/test_zero_nesting_init.py index 15d82fd8be00..fc40de3da229 100644 --- a/tests/unit/runtime/zero/test_zero_nesting_init.py +++ b/tests/unit/runtime/zero/test_zero_nesting_init.py @@ -18,7 +18,6 @@ class TestNestingInit(DistributedTest): def test_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) - with deepspeed.zero.Init(config_dict_or_path=ds_config): with deepspeed.zero.Init(config_dict_or_path=ds_config): model = torch.nn.Linear(4, 4) diff --git a/tests/unit/runtime/zero/test_zero_offloadpp.py b/tests/unit/runtime/zero/test_zero_offloadpp.py index 5bfec399e19f..a28f2385a0f2 100644 --- a/tests/unit/runtime/zero/test_zero_offloadpp.py +++ b/tests/unit/runtime/zero/test_zero_offloadpp.py @@ -6,11 +6,8 @@ import deepspeed.comm as dist from unit.common import DistributedTest from unit.simple_model import random_dataloader - import deepspeed - from deepspeed.runtime.zero.offload_config import DeepSpeedZeroOffloadOptimizerConfig - import torch.nn as nn diff --git a/tests/unit/runtime/zero/test_zero_tensor_fragment.py b/tests/unit/runtime/zero/test_zero_tensor_fragment.py index b3adfdf96c50..ea118bba2801 100644 --- a/tests/unit/runtime/zero/test_zero_tensor_fragment.py +++ b/tests/unit/runtime/zero/test_zero_tensor_fragment.py @@ -120,6 +120,7 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, froz "stage": zero_stage, } } + dtype = torch.float16 if offload_device == OffloadDeviceEnum.cpu: config_dict["zero_optimization"]["offload_optimizer"] = {"device": offload_device} @@ -131,7 +132,7 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, froz hidden_dim = 128 if zero_stage == 3: - with deepspeed.zero.Init(config_dict_or_path=config_dict): + with deepspeed.zero.Init(config_dict_or_path=config_dict, dtype=dtype): model = MyModel(hidden_dim, frozen_weights) else: model = MyModel(hidden_dim, frozen_weights) @@ -305,7 +306,6 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, dtyp config_dict["fp16"] = {"enabled": True, "initial_scale_power": 8} elif dtype == torch.bfloat16: config_dict["bf16"] = {"enabled": True} - hidden_dim = 128 if zero_stage == 3: config_dict["zero_optimization"]["param_persistence_threshold"] = hidden_dim diff --git a/tests/unit/runtime/zero/test_zeropp.py b/tests/unit/runtime/zero/test_zeropp.py index 7a05c2a8001b..27ec7269afc6 100644 --- a/tests/unit/runtime/zero/test_zeropp.py +++ b/tests/unit/runtime/zero/test_zeropp.py @@ -14,12 +14,6 @@ from deepspeed.runtime.zero.config import DeepSpeedZeroConfig import torch.nn as nn -import torch - -from transformers import AutoModelForCausalLM, AutoTokenizer -from torch.utils.data import DataLoader - -import numpy as np class NNModel(nn.Module): @@ -46,16 +40,9 @@ def _assert_no_secondary_tensor_group(model: Module) -> None: assert param.ds_zero_param_process_group is None -def _check_secondary_tensor_existence(model: Module) -> None: - for _, param in model.named_parameters(): - if param.ds_secondary_tensor is not None: - return True - return False - - def _assert_secondary_tensor_size(model: Module) -> None: - for name, param in model.named_parameters(): - assert param.ds_secondary_tensor is not None, f"param {param.ds_id}:{name} does not have secondary tensor" + for _, param in model.named_parameters(): + assert param.ds_secondary_tensor is not None assert param.ds_secondary_tensor.size()[0] % param.ds_tensor.size()[0] == 0 @@ -63,7 +50,7 @@ def _assert_secondary_tensor_size(model: Module) -> None: #Assert when zpg=1 that secondary group and tensors are invalid @pytest.mark.sequential @pytest.mark.parametrize("h_dim", [1024]) -@pytest.mark.parametrize("n_layers", [9]) +@pytest.mark.parametrize("n_layers", [4, 9]) @pytest.mark.parametrize("zpg", [1, 2, 4]) class TestZeroPPConfigSweep(DistributedTest): world_size = 4 @@ -105,172 +92,3 @@ def test(self, h_dim: int, n_layers: int, zpg: int) -> None: loss = model(batch[0], batch[1]) model.backward(loss) model.step() - - def test_eval(self, h_dim: int, n_layers: int, zpg: int) -> None: - # in this test case, we are testing that hpz should be enabled when eval mode is on - config_dict = { - "train_micro_batch_size_per_gpu": 1, - "zero_optimization": { - "stage": 3, - "stage3_max_reuse_distance": 0, - "zero_hpz_partition_size": zpg, - "contiguous_gradients": True, - "overlap_comm": True, - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1. - } - }, - "fp16": { - "enabled": True, - "loss_scale": 1., - } - } - - model = NNModel(h_dim, n_layers) - model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=h_dim, device=model.device) - dist.barrier() - if zpg == 1: - _assert_no_secondary_tensor_group(model) - - for n, batch in enumerate(data_loader): - if zpg != 1: - # here we check that the hpz is enabled when the previous iteration does not update the model - _assert_secondary_tensor_size(model) - with torch.no_grad(): - loss = model(batch[0], batch[1]) - - def test_gradient_accumulation(self, h_dim: int, n_layers: int, zpg: int) -> None: - # in this test case, we are testing that hpz should be enabled for the intermediate gradient accumulation steps - # In this test, we should disable loss_scale - config_dict = { - "train_micro_batch_size_per_gpu": 1, - "gradient_accumulation_steps": 3, - "zero_optimization": { - "stage": 3, - "stage3_max_reuse_distance": 0, - "zero_hpz_partition_size": zpg, - "contiguous_gradients": True, - "overlap_comm": True, - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1. - } - }, - "fp16": { - "enabled": True, - "loss_scale": 0., - } - } - - model = NNModel(h_dim, n_layers) - model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=h_dim, device=model.device) - dist.barrier() - if zpg == 1: - _assert_no_secondary_tensor_group(model) - - for n, batch in enumerate(data_loader): - if n == 0 and zpg != 1: - _assert_secondary_tensor_size(model) - # here we cannot assert that secondary tensor does not exist because the gradient is likely overflowed as we use random data - if n > 0 and n % 3 != 0 and zpg != 1: - # if the previous iteration does not update the model, then the hpz should be enabled - assert _check_secondary_tensor_existence(model), f"n={n}" - loss = model(batch[0], batch[1]) - model.backward(loss) - model.step() - - -@pytest.mark.nightly -@pytest.mark.parametrize("model_name", ["gpt2"]) -class TestZeroPPConvergence(DistributedTest): - world_size = 4 - - def load_and_prepare_data(self, model_name): - """Load model, tokenizer and dataset, and prepare data loader.""" - from datasets import load_dataset - - # Load model and tokenizer - model = AutoModelForCausalLM.from_pretrained(model_name) - tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenizer.pad_token = tokenizer.eos_token - - # Load and tokenize dataset - dataset = load_dataset("wikitext", 'wikitext-103-raw-v1', split='train[:1%]').filter(lambda x: x["text"]) - - def tokenize_function(examples): - # Tokenize and ensure 'labels' are the same as 'input_ids' - tokenized_output = tokenizer(examples["text"], padding="max_length", truncation=True, return_tensors='pt') - tokenized_output["labels"] = tokenized_output["input_ids"].clone() - return tokenized_output - - tokenized_dataset = dataset.map(tokenize_function, batched=True) - tokenized_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) - - # Create data loader - data_loader = DataLoader(tokenized_dataset, batch_size=1, shuffle=False) - return model, data_loader - - def get_loss(self, model, data_loader, config_dict, step=500): - """Train the model and calculate average loss.""" - # Initialize DeepSpeed - model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - dist.barrier() - model.train() - - # Training loop - losses = [] - for n, batch in enumerate(data_loader): - if n >= step: - break - batch = {k: v.to(model.device) for k, v in batch.items()} - outputs = model(**batch) - loss = outputs.loss - model.backward(loss) - model.step() - losses.append(loss.item()) - - return np.nanmean(losses[-100:]) - - def get_config_dict(self, use_quantized_weights=False, use_hpz=False): - """Generate the configuration dictionary for DeepSpeed.""" - config = { - "train_micro_batch_size_per_gpu": 1, - "zero_optimization": { - "stage": 3, - "stage3_max_reuse_distance": 0, - "contiguous_gradients": True, - "overlap_comm": True, - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-5 - } - }, - "fp16": { - "enabled": True - } - } - if use_quantized_weights: - config["zero_optimization"]["zero_quantized_weights"] = True - if use_hpz: - config["zero_optimization"]["zero_hpz_partition_size"] = self.world_size // 2 - return config - - def test(self, model_name): - torch.manual_seed(0) - model, data_loader = self.load_and_prepare_data(model_name) - zeropp_loss = self.get_loss(model, data_loader, self.get_config_dict(use_quantized_weights=True, use_hpz=True)) - model, data_loader = self.load_and_prepare_data(model_name) - baseline_loss = self.get_loss(model, data_loader, self.get_config_dict()) - - # Output and assert - print(f"zeropp_loss={zeropp_loss}, baseline_loss={baseline_loss}") - assert zeropp_loss < baseline_loss * 1.1, f"zeropp_loss={zeropp_loss}, baseline_loss={baseline_loss}" diff --git a/tests/unit/skip_marker.py b/tests/unit/skip_marker.py new file mode 100644 index 000000000000..956f90f9f0dc --- /dev/null +++ b/tests/unit/skip_marker.py @@ -0,0 +1,98 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_skip_tests = {} + +g1_lazy_skip_tests = { + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]": + "Skipping test due to segfault. SW-170285", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp32]": + "FusedAdam test not supported. Test got stuck.", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Stuck on eager mode", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Stuck on eager mode", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +g2_lazy_skip_tests = { + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]": + "Skipping test due to segfault. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Skipping test due to segfault then stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Skipping test due to segfault then stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Skipping test due to segfault then stuck. SW-175725.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp32]": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]": + "synapse detected critical error. SW-176889", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]": + "synapse detected critical error. SW-176889", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]": + "synapse detected critical error. SW-176889", + "unit/runtime/half_precision/test_fp16.py::TestFP16OptimizerForMoE::test_fused_gradnorm": + "FusedAdam test not supported. Test got stuck. SW-175725.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +hpu_eager_skip_tests = {} + +g1_eager_skip_tests = { + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +g2_eager_skip_tests = { + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "Skip, due to SW-182681", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "Skip, due to SW-182681", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "Skip, due to SW-182681", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +gpu_skip_tests = { + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "Disabled as it is causing test to stuck. SW-163517.", +} diff --git a/tests/unit/util.py b/tests/unit/util.py index 75c3000bd4a2..9d00da36ea0e 100644 --- a/tests/unit/util.py +++ b/tests/unit/util.py @@ -5,6 +5,8 @@ import pytest import torch +import os +import multiprocessing from deepspeed.accelerator import get_accelerator, is_current_accelerator_supported from deepspeed.git_version_info import torch_info from packaging import version as pkg_version @@ -47,11 +49,14 @@ def bf16_required_version_check(accelerator_check=True): cuda_version_available = CUDA_MAJOR >= 11 nccl_version_available = NCCL_MAJOR > 2 or (NCCL_MAJOR == 2 and NCCL_MINOR >= 10) npu_available = get_accelerator().device_name() == 'npu' + hpu_available = get_accelerator().device_name() == 'hpu' if torch_version_available and cuda_version_available and nccl_version_available and accelerator_pass: return True elif npu_available: return True + elif hpu_available: + return True else: return False @@ -76,3 +81,57 @@ def required_amp_check(): return False else: return True + + +def worker(proc_id, return_dict): + #TODO SW-114787: move to new api outside experimental + import habana_frameworks.torch.utils.experimental as htexp + deviceType = htexp._get_device_type() + if deviceType == htexp.synDeviceType.synDeviceGaudi: + return_dict['devicetype'] = "Gaudi" + elif deviceType == htexp.synDeviceType.synDeviceGaudi2: + return_dict['devicetype'] = "Gaudi2" + elif deviceType == htexp.synDeviceType.synDeviceGaudi3: + return_dict['devicetype'] = "Gaudi3" + else: + return_dict['devicetype'] = None + assert False, f'Unexpected hpu device Type: {deviceType}' + + +def get_hpu_dev_version(): + hpu_dev = None + if get_accelerator().device_name() != 'hpu': + return hpu_dev + if os.getenv("DEEPSPEED_UT_HL_DEVICE", default=None): + hpu_dev = os.getenv("DEEPSPEED_UT_HL_DEVICE") + if hpu_dev not in ["Gaudi", "Gaudi2", "Gaudi3"]: + manager = multiprocessing.Manager() + return_dict = manager.dict() + proc_id = 0 + multiprocessing.set_start_method("spawn", force=True) + p = multiprocessing.Process(target=worker, args=(proc_id, return_dict)) + p.start() + p.join() + try: + dev_type = return_dict['devicetype'] + except: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + p.terminate() + exit_code = p.exitcode + if exit_code: + assert False, 'HPU dev type process exit with: {}'.format(exit_code) + if dev_type in ["Gaudi", "Gaudi2", "Gaudi3"]: + hpu_dev = dev_type + os.environ['DEEPSPEED_UT_HL_DEVICE'] = dev_type + return dev_type + else: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + else: + return hpu_dev + + +def hpu_lazy_enabled(): + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + return thpu.is_lazy() + return False diff --git a/tests/unit/utils/test_groups.py b/tests/unit/utils/test_groups.py index d8f12be4f3c6..5cd35baf3510 100644 --- a/tests/unit/utils/test_groups.py +++ b/tests/unit/utils/test_groups.py @@ -18,7 +18,7 @@ def test_get_expert_parallel_ranks(): expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(world_size=16, - model_parallel_size_=2, + tensor_parallel_size_=2, expert_parallel_size_=4) assert expert_parallel_groups == [ [0, 2, 4, 6], diff --git a/tests/unit/utils/test_init_on_device.py b/tests/unit/utils/test_init_on_device.py index 5d84e9be855a..d9ff7eee1d7b 100644 --- a/tests/unit/utils/test_init_on_device.py +++ b/tests/unit/utils/test_init_on_device.py @@ -20,9 +20,10 @@ def test_on_device(self, device): if device == "meta" and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): pytest.skip("meta tensors only became stable after torch 1.10") - with OnDevice(dtype=torch.half, device=device): + dtype = torch.half + with OnDevice(dtype=dtype, device=device): model = SimpleModel(4) for p in model.parameters(): assert p.device == torch.device(device) - assert p.dtype == torch.half + assert p.dtype == dtype diff --git a/tests/unit/xfail_marker.py b/tests/unit/xfail_marker.py new file mode 100644 index 000000000000..aee098c16b54 --- /dev/null +++ b/tests/unit/xfail_marker.py @@ -0,0 +1,4178 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_xfail_tests = {} + +g1_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_model_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTP::test[falcon]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "Xfail, FP16 not supported.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Xfail, due to SW-166162.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Xfail, due to SW-166162.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail, due to SW-167459.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail, due to SW-170288.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail, due to SW-170288.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail, due to SW-145262. Gaudi1 does not support FP16.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-145262. Gaudi1 does not support FP16.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True]": + "Xfail, FP16 not supported.", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail, due to SW-175725.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Not supported on Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Not supported on Gaudi1", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + " Comm Init Rank Error.", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + " Comm Init Rank Error.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "Fp16 not supported by Gaudi1", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "Xfail, due to SW-180322.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Fp16 not supported by Gaudi1.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_json_config": + "fp16 is not supported Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_overlap_kwargs": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "fp16 is not supported Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", +} + +g2_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-1]": + "Xfail, due to SW-145262.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-2]": + "Xfail, due to SW-145262.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-3]": + "Xfail, due to SW-145262.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "Xfail, due to SW-145262.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "Xfail, due to SW-164606.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "Xfail, due to SW-156782.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": # noqa: F601 + "Xfail, due to SW-170181.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": # noqa: F601 + "Xfail, due to SW-168442.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "Xfail, due to SW-164593.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": # noqa: F601 + "Xfail, due to SW-145262.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": # noqa: F601 + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": # noqa: F601 + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Xfail, due to SW-138014.", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail, due to SW-175725.", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]": "Xfail due to SW-175725", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]" : "Xfail, due to SW-178730.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]" : "Xfail, due to SW-180322.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]" : "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]" : "Xfail, due to SW-179873.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]" : "Xfail due to SW-164593", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]" : "Xfail, due to SW-175716.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-181935", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]":"Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]":"Xfail due to SW-182753", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test":"Xfail due to SW-181939", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]":"Xfail due to SW-181939", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]":"Xfail due to SW-182759", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor":"Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible":"Xfail due to SW-182766", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]":"xfail due to model download", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]":"XFail due to SW-181940", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-181935", + +} + +hpu_eager_xfail_tests = {} + +g1_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_json_config": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_overlap_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-None]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype0]": + "Xfail due to SW-181951", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-t5]": + "Xfail, due to SW-182668", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian]": + "Xfail, due to SW-182669", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian]": + "Xfail, due to SW-182670", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Xfail due to SW-182509", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Xfail due to SW-182509", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]": + "Xfail, due to SW-182511", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to OP not implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to OP not implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", +} + +g2_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-3]": + "Xfail due to SW-181951", + "unit/runtime/half_precision/test_fp16.py::TestZeroStaticScale::test[True-3]": + "Xfail due to SW-181951", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "Xfail due to SW-181951", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "Xfail due to SW-181951", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen]": + "Xfail, due to SW-178702", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int4_quantization": + "Xfail, due to SW-182501", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int8_quantization": + "Xfail, due to SW-182501", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to op not been implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "Xfail due to SW-181940", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "Xfail due to SW-181939", +} + +gpu_xfail_tests = { + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Test requires higher memory.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Test requires higher memory.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[33-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_multi_sequence": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[63-1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6784]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[50304-6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding_offset": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[32000-5120]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[1024-1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[433-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_negative_logits": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[433-2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_determinism": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[2048-8192]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[32]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_successive_inputs": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[4096-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[6144-3072]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.bf16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.fp16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[13-2048-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[256-1024-4096]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[893-5120-2560]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[278-5120-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Test requires higher memory.", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-True-resulting_optimizer6]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-False-resulting_optimizer2]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-True-resulting_optimizer14]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-False-resulting_optimizer10]": + "Xfail, due to SW-176845", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail due to SW-177890 and SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "Xfail due to SW-177891", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "Xfail due to SW-177891", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "Xfail due to SW-177891", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "Xfail due to SW-177891", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "Xfail due to SW-177891", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "Xfail due to SW-177891", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "Xfail due to pytorch>2.0 is required", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype2]": + "Xfail due to pytorch>2.0 is required", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Xfail due to pytorch>2.0 is required", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to pytorch>2.0 is required", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Callable]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail due to OOM", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_unfused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-_LRScheduler]": + "Cuda OOM", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "XFail due to SW-181940", +}